diff --git a/.docker-env.sample b/.docker-env.sample deleted file mode 100644 index 63d11429..00000000 --- a/.docker-env.sample +++ /dev/null @@ -1,2 +0,0 @@ -DOCKER_HOST="tcp://192.168.2.2:2375" -DOCKER_TLS_VERIFY= diff --git a/.dockerignore b/.dockerignore index b6091a10..13d53d0d 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,16 +1,16 @@ .DS_Store -.git .idea -.vendor -.dockerignore bin -gopath -tmp state build +images/*/build +scripts/images/*/dist/ dist -Godeps/_workspace/pkg tests/integration/.venv* tests/integration/.tox */*/*/*.pyc */*/*/__pycache__ +.trash-cache +.dapper +vendor/*/*/*/.git +tmp diff --git a/.dockerignore.dapper b/.dockerignore.dapper deleted file mode 100644 index c8b6127d..00000000 --- a/.dockerignore.dapper +++ /dev/null @@ -1,15 +0,0 @@ -.DS_Store -.git -.idea -.vendor -.dockerignore -bin -gopath -tmp -state -build -Godeps/_workspace/pkg -tests/integration/.venv* -tests/integration/.tox -*/*/*/*.pyc -*/*/*/__pycache__ diff --git a/.drone.yml b/.drone.yml index 7b616e71..9a8850fc 100644 --- a/.drone.yml +++ b/.drone.yml @@ -3,4 +3,4 @@ build: volumes: - /var/run/docker.sock:/var/run/docker.sock commands: - - ./scripts/ci + - dapper ci diff --git a/.gitignore b/.gitignore index 0d2534cd..b8d58c6e 100644 --- a/.gitignore +++ b/.gitignore @@ -5,11 +5,14 @@ /build /dist /gopath +/images/*/build .dockerfile *.swp /tests/integration/MANIFEST /tests/integration/.venv* /tests/integration/.tox +/tests/integration/.idea *.pyc -__pychache__ -.docker-env.* +__pycache__ +/.dapper +/.trash-cache diff --git a/.package b/.package deleted file mode 100644 index 75529c3b..00000000 --- a/.package +++ /dev/null @@ -1 +0,0 @@ -github.com/rancher/os diff --git a/Dockerfile.amd64 b/Dockerfile.amd64 deleted file mode 100644 index 5171e98f..00000000 --- a/Dockerfile.amd64 +++ /dev/null @@ -1,11 +0,0 @@ -FROM debian:jessie -ENV DEBIAN_FRONTEND noninteractive -RUN apt-get update && apt-get install -y grub2 parted kexec-tools - -COPY ./scripts/installer /scripts -COPY ./build.conf /scripts/ - -COPY ./dist/artifacts/vmlinuz /dist/vmlinuz -COPY ./dist/artifacts/initrd /dist/initrd - -ENTRYPOINT ["/scripts/lay-down-os"] diff --git a/Dockerfile.dapper b/Dockerfile.dapper index c3e28c47..1ce0e21c 100644 --- a/Dockerfile.dapper +++ b/Dockerfile.dapper @@ -1,45 +1,176 @@ -FROM rancher/os-dapper-base - -RUN apt-get update && \ - apt-get -y install locales sudo vim less curl wget git rsync build-essential isolinux xorriso gccgo uuid \ - libblkid-dev libmount-dev libselinux1-dev cpio genisoimage qemu-kvm qemu python-pip ca-certificates pkg-config tox module-init-tools - -ARG HOST_ARCH -ENV HOST_ARCH ${HOST_ARCH} -RUN ln -sf go-6 /usr/bin/go && mkdir -p /usr/local && cd /usr/local && \ - wget -O - https://storage.googleapis.com/golang/go1.6.2.src.tar.gz | tar -xz && \ - cd go/src && GOROOT_BOOTSTRAP=/usr GOARCH=${HOST_ARCH} GOHOSTARCH=${HOST_ARCH} ./make.bash - -ENV PATH /usr/local/go/bin:$PATH -RUN mkdir -p /go/src /go/bin && chmod -R 777 /go -ENV GOPATH /go -ENV PATH /go/bin:$PATH - -ARG HOST_DOCKER_BINARY_URL -ENV HOST_DOCKER_BINARY_URL ${HOST_DOCKER_BINARY_URL} -RUN wget -O - ${HOST_DOCKER_BINARY_URL} > /usr/local/bin/docker -RUN chmod +x /usr/local/bin/docker +FROM ubuntu:16.04 +# FROM arm64=aarch64/ubuntu:16.04 arm=armhf/ubuntu:16.04 +ENV DAPPER_ENV VERSION DEV_BUILD ENV DAPPER_DOCKER_SOCKET true ENV DAPPER_SOURCE /go/src/github.com/rancher/os -ENV DAPPER_OUTPUT ./bin ./dist ./build/os-config.yml +ENV DAPPER_OUTPUT ./bin ./dist ./build/initrd ENV DAPPER_RUN_ARGS --privileged +ENV TRASH_CACHE ${DAPPER_SOURCE}/.trash-cache ENV SHELL /bin/bash WORKDIR ${DAPPER_SOURCE} -COPY .dockerignore.dapper .dockerignore +########## General Configuration ##################### +ARG DAPPER_HOST_ARCH=amd64 +ARG HOST_ARCH=${DAPPER_HOST_ARCH} +ARG ARCH=${HOST_ARCH} -CMD make +ARG OS_REPO=rancher +ARG HOSTNAME_DEFAULT=rancher +ARG DISTRIB_ID=RancherOS -ARG TOOLCHAIN -ENV TOOLCHAIN ${TOOLCHAIN} +ARG DOCKER_VERSION=1.11.2 +ARG DOCKER_PATCH_VERSION=v${DOCKER_VERSION}-ros1 +ARG DOCKER_BUILD_VERSION=1.10.3 +ARG DOCKER_BUILD_PATCH_VERSION=v${DOCKER_BUILD_VERSION}-ros1 +ARG SELINUX_POLICY_URL=https://github.com/rancher/refpolicy/releases/download/v0.0.2/policy.29 -RUN if [ "${TOOLCHAIN}" != "" ] && ! which ${TOOLCHAIN}-gcc; then \ - apt-get update && \ +ARG KERNEL_URL_amd64=https://github.com/rancher/os-kernel/releases/download/Ubuntu-4.4.0-23.41-rancher2/linux-4.4.10-rancher-x86.tar.gz +ARG KERNEL_URL_arm64=https://github.com/imikushin/os-kernel/releases/download/Estuary-4.4.0-arm64.8/linux-4.4.0-rancher-arm64.tar.gz + +ARG DOCKER_URL_amd64=https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz +ARG DOCKER_URL_arm=https://github.com/rancher/docker/releases/download/${DOCKER_PATCH_VERSION}/docker-${DOCKER_VERSION}_arm.tgz +ARG DOCKER_URL_arm64=https://github.com/rancher/docker/releases/download/${DOCKER_PATCH_VERSION}/docker-${DOCKER_VERSION}_arm64.tgz + +ARG BUILD_DOCKER_URL_amd64=https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_BUILD_VERSION} +ARG BUILD_DOCKER_URL_arm=https://github.com/rancher/docker/releases/download/${DOCKER_BUILD_PATCH_VERSION}/docker-${DOCKER_BUILD_VERSION}_arm +ARG BUILD_DOCKER_URL_arm64=https://github.com/rancher/docker/releases/download/${DOCKER_BUILD_PATCH_VERSION}/docker-${DOCKER_BUILD_VERSION}_arm64 + +ARG TOOLCHAIN_arm64=aarch64-linux-gnu +ARG TOOLCHAIN_arm=arm-linux-gnueabihf + +ARG OS_RELEASES_YML=https://releases.rancher.com/os/releases.yml +ARG VBOX_MODULES_URL_amd64=https://github.com/rancher/os-vbox/releases/download/v0.0.2/vbox-modules.tar.gz + +ARG OS_SERVICES_REPO=https://raw.githubusercontent.com/${OS_REPO}/os-services +ARG IMAGE_NAME=${OS_REPO}/os +ARG DFS_IMAGE=${OS_REPO}/docker:v${DOCKER_VERSION} + +ARG OS_BASE_URL_amd64=https://github.com/rancher/os-base/releases/download/v2016.05-3/os-base_amd64.tar.xz +ARG OS_BASE_URL_arm64=https://github.com/rancher/os-base/releases/download/v2016.05-3/os-base_arm64.tar.xz +ARG OS_BASE_URL_arm=https://github.com/rancher/os-base/releases/download/v2016.05-3/os-base_arm.tar.xz + +###################################################### + +# Set up environment and export all ARGS as ENV +ENV ARCH ${ARCH} +ENV BUILD_DOCKER_URL BUILD_DOCKER_URL_${ARCH} +ENV BUILD_DOCKER_URL_amd64 ${BUILD_DOCKER_URL_amd64} +ENV BUILD_DOCKER_URL_arm ${BUILD_DOCKER_URL_arm} +ENV BUILD_DOCKER_URL_arm64 ${BUILD_DOCKER_URL_arm64} +ENV DAPPER_HOST_ARCH ${DAPPER_HOST_ARCH} +ENV DFS_IMAGE ${DFS_IMAGE} +ENV DISTRIB_ID ${DISTRIB_ID} +ENV DOCKER_PATCH_VERSION ${DOCKER_PATCH_VERSION} +ENV DOCKER_URL DOCKER_URL_${ARCH} +ENV DOCKER_URL_amd64 ${DOCKER_URL_amd64} +ENV DOCKER_URL_arm ${DOCKER_URL_arm} +ENV DOCKER_URL_arm64 ${DOCKER_URL_arm64} +ENV DOCKER_VERSION ${DOCKER_VERSION} +ENV DOWNLOADS /usr/src/downloads +ENV GOPATH /go +ENV GO_VERSION 1.6.2 +ENV GOARCH $ARCH +ENV HOSTNAME_DEFAULT ${HOSTNAME_DEFAULT} +ENV HOST_ARCH ${HOST_ARCH} +ENV IMAGE_NAME ${IMAGE_NAME} +ENV KERNEL_URL KERNEL_URL_${ARCH} +ENV KERNEL_URL_amd64 ${KERNEL_URL_amd64} +ENV KERNEL_URL_arm64 ${KERNEL_URL_arm64} +ENV OS_BASE_SHA1 OS_BASE_SHA1_${ARCH} +ENV OS_BASE_URL OS_BASE_URL_${ARCH} +ENV OS_BASE_URL_amd64 ${OS_BASE_URL_amd64} +ENV OS_BASE_URL_arm ${OS_BASE_URL_arm} +ENV OS_BASE_URL_arm64 ${OS_BASE_URL_arm64} +ENV OS_RELEASES_YML ${OS_RELEASES_YML} +ENV OS_REPO ${OS_REPO} +ENV OS_SERVICES_REPO ${OS_SERVICES_REPO} +ENV PATH ${GOPATH}/bin:/usr/local/go/bin:$PATH +ENV REPO_VERSION master +ENV SELINUX_POLICY_URL ${SELINUX_POLICY_URL} +ENV TOOLCHAIN_arm ${TOOLCHAIN_arm} +ENV TOOLCHAIN_arm64 ${TOOLCHAIN_arm64} +ENV VBOX_MODULES_URL ${VBOX_MODULES_URL} +ENV VBOX_MODULES_URL VBOX_MODULES_URL_${ARCH} +ENV VBOX_MODULES_URL_amd64 ${VBOX_MODULES_URL_amd64} + +RUN mkdir -p ${DOWNLOADS} + +RUN apt-get update && \ + apt-get install -y \ + build-essential \ + ca-certificates \ + cpio \ + curl \ + dosfstools \ + gccgo \ + genisoimage \ + git \ + isolinux \ + less \ + libblkid-dev \ + libmount-dev \ + libselinux1-dev \ + locales \ + module-init-tools \ + pkg-config \ + python-pip \ + qemu \ + qemu-kvm \ + rsync \ + sudo \ + tox \ + vim \ + wget \ + xorriso + + +# Download kernel +RUN rm /bin/sh && ln -s /bin/bash /bin/sh +RUN if [ -n "${!KERNEL_URL}" ]; then \ + curl -fL ${!KERNEL_URL} > ${DOWNLOADS}/kernel.tar.gz \ + ;fi + +# Download SELinux Policy +RUN curl -pfL ${SELINUX_POLICY_URL} > ${DOWNLOADS}/$(basename ${SELINUX_POLICY_URL}) + +# VBox URL +RUN if [ -n "${!VBOX_MODULES_URL}" ]; then \ + curl -pfL ${!VBOX_MODULES_URL} > ${DOWNLOADS}/vbox-modules.tar.gz \ + ;fi + +# Install Go +COPY assets/go-dnsclient.patch ${DAPPER_SOURCE} +RUN ln -sf go-6 /usr/bin/go && \ + curl -sfL https://storage.googleapis.com/golang/go${GO_VERSION}.src.tar.gz | tar -xzf - -C /usr/local && \ + patch /usr/local/go/src/net/dnsclient_unix.go ${DAPPER_SOURCE}/go-dnsclient.patch && \ + cd /usr/local/go/src && \ + GOROOT_BOOTSTRAP=/usr GOARCH=${HOST_ARCH} GOHOSTARCH=${HOST_ARCH} ./make.bash && \ + rm /usr/bin/go + +# Install Host Docker +RUN curl -fL ${!BUILD_DOCKER_URL} > /usr/bin/docker && \ + chmod +x /usr/bin/docker + +# Install Target Docker +RUN curl -fL ${!DOCKER_URL} > ${DOWNLOADS}/docker.tgz + +# Install Trash +RUN go get github.com/rancher/trash + +# Install dapper +RUN curl -sL https://releases.rancher.com/dapper/latest/dapper-`uname -s`-`uname -m | sed 's/arm.*/arm/'` > /usr/bin/dapper && \ + chmod +x /usr/bin/dapper + +# Install toolchain +RUN TOOLCHAIN=TOOLCHAIN_${ARCH} && \ + echo export TOOLCHAIN=${!TOOLCHAIN} > /usr/src/toolchain-env +RUN source /usr/src/toolchain-env && \ + if [ "${TOOLCHAIN}" != "" ] && ! which ${TOOLCHAIN}-gcc; then \ apt-get install -y gcc-${TOOLCHAIN} g++-${TOOLCHAIN} \ ;fi -RUN if [ "${TOOLCHAIN}" != "" ]; then \ +RUN source /usr/src/toolchain-env; if [ "${TOOLCHAIN}" != "" ]; then \ apt-get update && \ cd /usr/local/src && \ for i in libselinux libsepol pcre3 util-linux; do \ @@ -48,7 +179,7 @@ RUN if [ "${TOOLCHAIN}" != "" ]; then \ ;done \ ;fi -RUN if [ "${TOOLCHAIN}" != "" ]; then \ +RUN source /usr/src/toolchain-env; if [ "${TOOLCHAIN}" != "" ]; then \ cd /usr/local/src/pcre3-* && \ autoreconf && \ CC=${TOOLCHAIN}-gcc CXX=${TOOLCHAIN}-g++ ./configure --host=${TOOLCHAIN} --prefix=/usr/${TOOLCHAIN} && \ @@ -56,7 +187,7 @@ RUN if [ "${TOOLCHAIN}" != "" ]; then \ make install \ ;fi -RUN if [ "${TOOLCHAIN}" != "" ]; then \ +RUN source /usr/src/toolchain-env; if [ "${TOOLCHAIN}" != "" ]; then \ cd /usr/local/src/libselinux-* && \ CC=${TOOLCHAIN}-gcc CXX=${TOOLCHAIN}-g++ make CFLAGS=-Wall && \ make PREFIX=/usr/${TOOLCHAIN} DESTDIR=/usr/${TOOLCHAIN} install && \ @@ -65,7 +196,7 @@ RUN if [ "${TOOLCHAIN}" != "" ]; then \ make PREFIX=/usr/${TOOLCHAIN} DESTDIR=/usr/${TOOLCHAIN} install \ ;fi -RUN if [ "${TOOLCHAIN}" != "" ]; then \ +RUN source /usr/src/toolchain-env; if [ "${TOOLCHAIN}" != "" ]; then \ cd /usr/local/src/util-linux-* && \ autoreconf && \ CC=${TOOLCHAIN}-gcc CXX=${TOOLCHAIN}-g++ ./configure --host=${TOOLCHAIN} --prefix=/usr/${TOOLCHAIN} \ @@ -78,5 +209,8 @@ RUN if [ "${TOOLCHAIN}" != "" ]; then \ make install \ ;fi -RUN apt-get update && \ - apt-get -y install dosfstools +RUN mkdir -p images/00-rootfs/build && \ + curl -pfL ${!OS_BASE_URL} | tar xvJf - -C images/00-rootfs/build + +ENTRYPOINT ["./scripts/entry"] +CMD ["ci"] diff --git a/Makefile b/Makefile index 263c9362..c085ff61 100644 --- a/Makefile +++ b/Makefile @@ -1,105 +1,39 @@ -FORCE_PULL := 0 -DEV_BUILD := 0 -HOST_ARCH := amd64 -ARCH := amd64 -SUFFIX := $(if $(filter-out amd64,$(ARCH)),_$(ARCH)) -HOST_SUFFIX := $(if $(filter-out amd64,$(HOST_ARCH)),_$(HOST_ARCH)) +TARGETS := $(shell ls scripts | grep -vE 'clean|run|help') -include build.conf -include build.conf.$(ARCH) +.dapper: + @echo Downloading dapper + @curl -sL https://releases.rancher.com/dapper/latest/dapper-`uname -s`-`uname -m` > .dapper.tmp + @@chmod +x .dapper.tmp + @./.dapper.tmp -v + @mv .dapper.tmp .dapper +$(TARGETS): .dapper + ./.dapper $@ -bin/ros: - mkdir -p $(dir $@) - ARCH=$(ARCH) VERSION=$(VERSION) ./scripts/mk-ros.sh $@ +trash: .dapper + ./.dapper -m bind trash -build/host_ros: bin/ros - mkdir -p $(dir $@) -ifeq "$(ARCH)" "$(HOST_ARCH)" - ln -sf ../bin/ros $@ -else - ARCH=$(HOST_ARCH) TOOLCHAIN= VERSION=$(VERSION) ./scripts/mk-ros.sh $@ -endif +trash-keep: .dapper + ./.dapper -m bind trash -k +deps: trash -assets/docker: - mkdir -p $(dir $@) - wget -O - "$(DOCKER_BINARY_URL)" > $@ - chmod +x $@ +build/initrd/.id: + dapper prepare -assets/selinux/policy.29: - mkdir -p $(dir $@) - wget -O - "$(SELINUX_POLICY_URL)" > $@ +run: build/initrd/.id + dapper -m bind build-target + ./scripts/run -assets/modules.tar.gz: - mkdir -p $(dir $@) -ifeq "$(ARCH)" "amd64" - curl -L "$(VBOX_MODULES_URL)" > $@ -else - touch $@ -endif +shell-bind: + dapper -m bind -s -ifdef COMPILED_KERNEL_URL +clean: + @./scripts/clean -installer: minimal - docker build -t $(IMAGE_NAME):$(VERSION)$(SUFFIX) -f Dockerfile.$(ARCH) . +help: + @./scripts/help -dist/artifacts/vmlinuz: build/kernel/ - mkdir -p $(dir $@) - mv $(or $(wildcard build/kernel/boot/vmlinuz*), $(wildcard build/kernel/boot/vmlinux*)) $@ +.DEFAULT_GOAL := default - -build/kernel/: - mkdir -p $@ - wget -O - "$(COMPILED_KERNEL_URL)" | tar -xzf - -C $@ - - -dist/artifacts/initrd: bin/ros assets/docker assets/selinux/policy.29 build/kernel/ build/images.tar assets/modules.tar.gz - mkdir -p $(dir $@) - HOST_SUFFIX=$(HOST_SUFFIX) SUFFIX=$(SUFFIX) DFS_IMAGE=$(DFS_IMAGE) DEV_BUILD=$(DEV_BUILD) \ - KERNEL_RELEASE=$(KERNEL_RELEASE) ARCH=$(ARCH) ./scripts/mk-initrd.sh $@ - - -dist/artifacts/rancheros.iso: minimal - ./scripts/mk-rancheros-iso.sh - -all: minimal installer iso - -initrd: dist/artifacts/initrd - -minimal: initrd dist/artifacts/vmlinuz - -iso: dist/artifacts/rancheros.iso dist/artifacts/iso-checksums.txt - -test: minimal - ./scripts/unit-test - cd tests/integration && HOST_ARCH=$(HOST_ARCH) ARCH=$(ARCH) tox - -.PHONY: all minimal initrd iso installer test - -endif - - -build/os-config.yml: build/host_ros - ARCH=$(ARCH) VERSION=$(VERSION) ./scripts/gen-os-config.sh $@ - - -build/images.tar: build/host_ros build/os-config.yml - ARCH=$(ARCH) FORCE_PULL=$(FORCE_PULL) ./scripts/mk-images-tar.sh - - -dist/artifacts/rootfs.tar.gz: bin/ros assets/docker build/images.tar assets/selinux/policy.29 assets/modules.tar.gz - mkdir -p $(dir $@) - HOST_SUFFIX=$(HOST_SUFFIX) SUFFIX=$(SUFFIX) DFS_IMAGE=$(DFS_IMAGE) DEV_BUILD=$(DEV_BUILD) IS_ROOTFS=1 ./scripts/mk-initrd.sh $@ - - -dist/artifacts/iso-checksums.txt: dist/artifacts/rancheros.iso - ./scripts/mk-iso-checksums-txt.sh - - -version: - @echo $(VERSION) - -rootfs: dist/artifacts/rootfs.tar.gz - -.PHONY: rootfs version bin/ros +.PHONY: $(TARGETS) diff --git a/assets/go-dnsclient.patch b/assets/go-dnsclient.patch new file mode 100644 index 00000000..9df6543e --- /dev/null +++ b/assets/go-dnsclient.patch @@ -0,0 +1,24 @@ +265,270d264 +< // Ensure only one update at a time checks resolv.conf. +< if !conf.tryAcquireSema() { +< return +< } +< defer conf.releaseSema() +< +276a271,280 +> conf.update(name) +> } +> +> func (conf *resolverConfig) update(name string) { +> // Ensure only one update at a time checks resolv.conf. +> if !conf.tryAcquireSema() { +> return +> } +> defer conf.releaseSema() +> +293a298,302 +> } +> +> func UpdateDnsConf() { +> resolvConf.initOnce.Do(resolvConf.init) +> resolvConf.update("/etc/resolv.conf") diff --git a/assets/selinux/failsafe_context b/assets/selinux/ros/contexts/failsafe_context similarity index 100% rename from assets/selinux/failsafe_context rename to assets/selinux/ros/contexts/failsafe_context diff --git a/assets/selinux/lxc_contexts b/assets/selinux/ros/contexts/lxc_contexts similarity index 100% rename from assets/selinux/lxc_contexts rename to assets/selinux/ros/contexts/lxc_contexts diff --git a/assets/selinux/seusers b/assets/selinux/ros/seusers similarity index 100% rename from assets/selinux/seusers rename to assets/selinux/ros/seusers diff --git a/build.conf b/build.conf deleted file mode 100644 index ed81c211..00000000 --- a/build.conf +++ /dev/null @@ -1,8 +0,0 @@ -IMAGE_NAME=rancher/os -VERSION=v0.4.4-dev -DFS_IMAGE=rancher/docker:v1.10.3 -SELINUX_POLICY_URL=https://github.com/rancher/refpolicy/releases/download/v0.0.2/policy.29 - -HOSTNAME_DEFAULT=rancher -OS_IMAGES_ROOT=rancher -OS_SERVICES_REPO=https://raw.githubusercontent.com/rancher/os-services diff --git a/build.conf.amd64 b/build.conf.amd64 deleted file mode 100644 index e7d608d3..00000000 --- a/build.conf.amd64 +++ /dev/null @@ -1,7 +0,0 @@ -DAPPER_BASE=ubuntu:16.04 -TOOLCHAIN= #empty - -COMPILED_KERNEL_URL=https://github.com/rancher/os-kernel/releases/download/Ubuntu-4.2.0-34.39-rancher/linux-4.2.8-ckt4-rancher-x86.tar.gz -DOCKER_BINARY_URL=https://get.docker.com/builds/Linux/x86_64/docker-1.10.3 -OS_RELEASES_YML=https://releases.rancher.com/os/releases.yml -VBOX_MODULES_URL=https://github.com/rancher/os-vbox/releases/download/v0.0.2/vbox-modules.tar.gz diff --git a/build.conf.arm b/build.conf.arm deleted file mode 100644 index 07635241..00000000 --- a/build.conf.arm +++ /dev/null @@ -1,6 +0,0 @@ -DAPPER_BASE=armhf/ubuntu:16.04 -TOOLCHAIN=arm-linux-gnueabihf - -COMPILED_KERNEL_URL= #empty -DOCKER_BINARY_URL=https://github.com/rancher/docker/releases/download/v1.10.3-ros1/docker-1.10.3_arm -OS_RELEASES_YML=https://releases.rancher.com/os/releases_arm.yml diff --git a/build.conf.arm64 b/build.conf.arm64 deleted file mode 100644 index f891cb2c..00000000 --- a/build.conf.arm64 +++ /dev/null @@ -1,6 +0,0 @@ -DAPPER_BASE=aarch64/ubuntu:16.04 -TOOLCHAIN=aarch64-linux-gnu - -COMPILED_KERNEL_URL=https://github.com/imikushin/os-kernel/releases/download/Estuary-4.1.18-arm64-3/linux-4.1.18-arm64.tar.gz -DOCKER_BINARY_URL=https://github.com/rancher/docker/releases/download/v1.10.3-ros1/docker-1.10.3_arm64 -OS_RELEASES_YML=https://releases.rancher.com/os/releases_arm64.yml diff --git a/build.sh b/build.sh deleted file mode 100755 index 00abad72..00000000 --- a/build.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -set -e - -export ARCH=${ARCH:-amd64} - -cd $(dirname $0) - -if [ "$1" != "--dev" ]; then - echo - echo Running \"production\" build. Will use lzma to compress initrd, which is somewhat slow... - echo Ctrl+C if you don\'t want this. - echo - echo For \"developer\" builds, run ./build.sh --dev - echo - ./scripts/make.sh all -else - ./scripts/make.sh DEV_BUILD=1 all -fi - -ls -lh dist/artifacts diff --git a/cmd/cloudinit/cloudinit.go b/cmd/cloudinit/cloudinit.go index e5e82e97..53804465 100644 --- a/cmd/cloudinit/cloudinit.go +++ b/cmd/cloudinit/cloudinit.go @@ -18,8 +18,9 @@ package cloudinit import ( "errors" "flag" - "io/ioutil" + "fmt" "os" + "os/exec" "strings" "sync" "time" @@ -33,6 +34,7 @@ import ( "github.com/coreos/coreos-cloudinit/datasource/file" "github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean" "github.com/coreos/coreos-cloudinit/datasource/metadata/ec2" + "github.com/coreos/coreos-cloudinit/datasource/metadata/gce" "github.com/coreos/coreos-cloudinit/datasource/metadata/packet" "github.com/coreos/coreos-cloudinit/datasource/proc_cmdline" "github.com/coreos/coreos-cloudinit/datasource/url" @@ -40,6 +42,7 @@ import ( "github.com/coreos/coreos-cloudinit/system" "github.com/rancher/netconf" rancherConfig "github.com/rancher/os/config" + "github.com/rancher/os/util" ) const ( @@ -47,6 +50,7 @@ const ( datasourceMaxInterval = 30 * time.Second datasourceTimeout = 5 * time.Minute sshKeyName = "rancheros-cloud-config" + resizeStamp = "/var/lib/rancher/resizefs.done" ) var ( @@ -71,13 +75,13 @@ func saveFiles(cloudConfigBytes, scriptBytes []byte, metadata datasource.Metadat if len(scriptBytes) > 0 { log.Infof("Writing to %s", rancherConfig.CloudConfigScriptFile) - if err := ioutil.WriteFile(rancherConfig.CloudConfigScriptFile, scriptBytes, 500); err != nil { + if err := util.WriteFileAtomic(rancherConfig.CloudConfigScriptFile, scriptBytes, 500); err != nil { log.Errorf("Error while writing file %s: %v", rancherConfig.CloudConfigScriptFile, err) return err } } - if err := ioutil.WriteFile(rancherConfig.CloudConfigBootFile, cloudConfigBytes, 400); err != nil { + if err := util.WriteFileAtomic(rancherConfig.CloudConfigBootFile, cloudConfigBytes, 400); err != nil { return err } log.Infof("Written to %s:\n%s", rancherConfig.CloudConfigBootFile, string(cloudConfigBytes)) @@ -87,7 +91,7 @@ func saveFiles(cloudConfigBytes, scriptBytes []byte, metadata datasource.Metadat return err } - if err = ioutil.WriteFile(rancherConfig.MetaDataFile, metaDataBytes, 400); err != nil { + if err = util.WriteFileAtomic(rancherConfig.MetaDataFile, metaDataBytes, 400); err != nil { return err } log.Infof("Written to %s:\n%s", rancherConfig.MetaDataFile, string(metaDataBytes)) @@ -96,11 +100,7 @@ func saveFiles(cloudConfigBytes, scriptBytes []byte, metadata datasource.Metadat } func currentDatasource() (datasource.Datasource, error) { - cfg, err := rancherConfig.LoadConfig() - if err != nil { - log.WithFields(log.Fields{"err": err}).Error("Failed to read rancher config") - return nil, err - } + cfg := rancherConfig.LoadConfig() dss := getDatasources(cfg) if len(dss) == 0 { @@ -168,12 +168,31 @@ func fetchUserData() ([]byte, datasource.Metadata, error) { return userDataBytes, metadata, nil } -func executeCloudConfig() error { - cc, err := rancherConfig.LoadConfig() +func resizeDevice(cfg *rancherConfig.CloudConfig) error { + cmd := exec.Command("growpart", cfg.Rancher.ResizeDevice, "1") + err := cmd.Run() if err != nil { return err } + cmd = exec.Command("partprobe") + err = cmd.Run() + if err != nil { + return err + } + + cmd = exec.Command("resize2fs", fmt.Sprintf("%s1", cfg.Rancher.ResizeDevice)) + err = cmd.Run() + if err != nil { + return err + } + + return nil +} + +func executeCloudConfig() error { + cc := rancherConfig.LoadConfig() + if len(cc.SSHAuthorizedKeys) > 0 { authorizeSSHKeys("rancher", cc.SSHAuthorizedKeys, sshKeyName) authorizeSSHKeys("docker", cc.SSHAuthorizedKeys, sshKeyName) @@ -189,6 +208,14 @@ func executeCloudConfig() error { log.Printf("Wrote file %s to filesystem", fullPath) } + if _, err := os.Stat(resizeStamp); os.IsNotExist(err) && cc.Rancher.ResizeDevice != "" { + if err := resizeDevice(cc); err == nil { + os.Create(resizeStamp) + } else { + log.Errorf("Failed to resize %s: %s", cc.Rancher.ResizeDevice, err) + } + } + return nil } @@ -261,12 +288,7 @@ func getDatasources(cfg *rancherConfig.CloudConfig) []datasource.Datasource { } case "gce": if network { - gceCloudConfigFile, err := GetAndCreateGceDataSourceFilename() - if err != nil { - log.Errorf("Could not retrieve GCE CloudConfig %s", err) - continue - } - dss = append(dss, file.NewDatasource(gceCloudConfigFile)) + dss = append(dss, gce.NewDatasource("http://metadata.google.internal/")) } case "packet": if !network { diff --git a/cmd/cloudinit/gce.go b/cmd/cloudinit/gce.go deleted file mode 100644 index c42273e5..00000000 --- a/cmd/cloudinit/gce.go +++ /dev/null @@ -1,140 +0,0 @@ -package cloudinit - -import ( - "io/ioutil" - "strings" - - log "github.com/Sirupsen/logrus" - yaml "github.com/cloudfoundry-incubator/candiedyaml" - "google.golang.org/cloud/compute/metadata" -) - -type GceCloudConfig struct { - FileName string - UserData string - NonUserDataSSHKeys []string -} - -const ( - gceCloudConfigFile = "/var/lib/rancher/conf/gce_cloudinit_config.yml" -) - -func NewGceCloudConfig() *GceCloudConfig { - - userData, err := metadata.InstanceAttributeValue("user-data") - if err != nil { - log.Errorf("Could not retrieve user-data: %s", err) - } - - projectSSHKeys, err := metadata.ProjectAttributeValue("sshKeys") - if err != nil { - log.Errorf("Could not retrieve project SSH Keys: %s", err) - } - - instanceSSHKeys, err := metadata.InstanceAttributeValue("sshKeys") - if err != nil { - log.Errorf("Could not retrieve instance SSH Keys: %s", err) - } - - nonUserDataSSHKeysRaw := projectSSHKeys + "\n" + instanceSSHKeys - nonUserDataSSHKeys := gceSshKeyFormatter(nonUserDataSSHKeysRaw) - - gceCC := &GceCloudConfig{ - FileName: gceCloudConfigFile, - UserData: userData, - NonUserDataSSHKeys: nonUserDataSSHKeys, - } - - return gceCC -} - -func GetAndCreateGceDataSourceFilename() (string, error) { - gceCC := NewGceCloudConfig() - err := gceCC.saveToFile(gceCC.FileName) - if err != nil { - log.Errorf("Error: %s", err) - return "", err - } - return gceCC.FileName, nil -} - -func (cc *GceCloudConfig) saveToFile(filename string) error { - //Get Merged UserData sshkeys - data, err := cc.getMergedUserData() - if err != nil { - log.Errorf("Could not process userdata: %s", err) - return err - } - //write file - writeFile(filename, data) - return nil -} - -func (cc *GceCloudConfig) getMergedUserData() ([]byte, error) { - var returnUserData []byte - userdata := make(map[string]interface{}) - - if cc.UserData != "" { - log.Infof("Found UserData Config") - err := yaml.Unmarshal([]byte(cc.UserData), &userdata) - if err != nil { - log.Errorf("Could not unmarshal data: %s", err) - return nil, err - } - } - - var auth_keys []string - if _, exists := userdata["ssh_authorized_keys"]; exists { - udSshKeys := userdata["ssh_authorized_keys"].([]interface{}) - log.Infof("userdata %s", udSshKeys) - - for _, value := range udSshKeys { - auth_keys = append(auth_keys, value.(string)) - } - } - if cc.NonUserDataSSHKeys != nil { - for _, value := range cc.NonUserDataSSHKeys { - auth_keys = append(auth_keys, value) - } - } - userdata["ssh_authorized_keys"] = auth_keys - - yamlUserData, err := yaml.Marshal(&userdata) - if err != nil { - log.Errorf("Could not Marshal userdata: %s", err) - return nil, err - } else { - returnUserData = append([]byte("#cloud-config\n"), yamlUserData...) - } - - return returnUserData, nil -} - -func writeFile(filename string, data []byte) error { - if err := ioutil.WriteFile(filename, data, 400); err != nil { - log.Errorf("Could not write file %v", err) - return err - } - return nil -} - -func gceSshKeyFormatter(rawKeys string) []string { - keySlice := strings.Split(rawKeys, "\n") - var cloudFormatedKeys []string - - if len(keySlice) > 0 { - for i := range keySlice { - keyString := keySlice[i] - sIdx := strings.Index(keyString, ":") - if sIdx != -1 { - key := strings.TrimSpace(keyString[sIdx+1:]) - keyA := strings.Split(key, " ") - key = strings.Join(keyA, " ") - if key != "" { - cloudFormatedKeys = append(cloudFormatedKeys, key) - } - } - } - } - return cloudFormatedKeys -} diff --git a/cmd/control/cli.go b/cmd/control/cli.go index d8c19eb2..f5d7b833 100644 --- a/cmd/control/cli.go +++ b/cmd/control/cli.go @@ -31,6 +31,12 @@ func Main() { HideHelp: true, Subcommands: configSubcommands(), }, + { + Name: "console", + Usage: "console container commands", + HideHelp: true, + Subcommands: consoleSubcommands(), + }, { Name: "dev", ShortName: "d", diff --git a/cmd/control/config.go b/cmd/control/config.go index d2512647..7c4b15c1 100644 --- a/cmd/control/config.go +++ b/cmd/control/config.go @@ -14,6 +14,7 @@ import ( "github.com/codegangsta/cli" "github.com/rancher/os/config" + "github.com/rancher/os/util" ) func configSubcommands() []cli.Command { @@ -28,17 +29,6 @@ func configSubcommands() []cli.Command { Usage: "set a value", Action: configSet, }, - { - Name: "import", - Usage: "import configuration from standard in or a file", - Action: runImport, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "input, i", - Usage: "File from which to read", - }, - }, - }, { Name: "images", Usage: "List Docker images for a configuration from a file", @@ -106,7 +96,7 @@ func imagesFromConfig(cfg *config.CloudConfig) []string { return images } -func runImages(c *cli.Context) { +func runImages(c *cli.Context) error { configFile := c.String("input") cfg, err := config.ReadConfig(nil, false, configFile) if err != nil { @@ -114,12 +104,14 @@ func runImages(c *cli.Context) { } images := imagesFromConfig(cfg) fmt.Println(strings.Join(images, " ")) + return nil } -func runGenerate(c *cli.Context) { +func runGenerate(c *cli.Context) error { if err := genTpl(os.Stdin, os.Stdout); err != nil { log.Fatalf("Failed to generate config, err: '%s'", err) } + return nil } func genTpl(in io.Reader, out io.Writer) error { @@ -140,76 +132,30 @@ func env2map(env []string) map[string]string { return m } -func runImport(c *cli.Context) { - var input io.ReadCloser - var err error - input = os.Stdin - cfg, err := config.LoadConfig() - - if err != nil { - log.Fatal(err) - } - - inputFile := c.String("input") - if inputFile != "" { - input, err = os.Open(inputFile) - if err != nil { - log.Fatal(err) - } - defer input.Close() - } - - bytes, err := ioutil.ReadAll(input) - if err != nil { - log.Fatal(err) - } - - cfg, err = cfg.Import(bytes) - if err != nil { - log.Fatal(err) - } - - if err := cfg.Save(); err != nil { - log.Fatal(err) - } -} - -func configSet(c *cli.Context) { +func configSet(c *cli.Context) error { key := c.Args().Get(0) value := c.Args().Get(1) if key == "" { - return + return nil } - cfg, err := config.LoadConfig() + err := config.Set(key, value) if err != nil { log.Fatal(err) } - cfgDiff, err := cfg.Set(key, value) - if err != nil { - log.Fatal(err) - } - - if err := cfg.Save(cfgDiff); err != nil { - log.Fatal(err) - } + return nil } -func configGet(c *cli.Context) { +func configGet(c *cli.Context) error { arg := c.Args().Get(0) if arg == "" { - return + return nil } - cfg, err := config.LoadConfig() + val, err := config.Get(arg) if err != nil { - log.WithFields(log.Fields{"err": err}).Fatal("config get: failed to load config") - } - - val, err := cfg.GetIgnoreOmitEmpty(arg) - if err != nil { - log.WithFields(log.Fields{"cfg": cfg, "key": arg, "val": val, "err": err}).Fatal("config get: failed to retrieve value") + log.WithFields(log.Fields{"key": arg, "val": val, "err": err}).Fatal("config get: failed to retrieve value") } printYaml := false @@ -229,31 +175,26 @@ func configGet(c *cli.Context) { } else { fmt.Println(val) } + + return nil } -func merge(c *cli.Context) { +func merge(c *cli.Context) error { bytes, err := ioutil.ReadAll(os.Stdin) if err != nil { log.Fatal(err) } - cfg, err := config.LoadConfig() + err = config.Merge(bytes) if err != nil { log.Fatal(err) } - cfg, err = cfg.MergeBytes(bytes) - if err != nil { - log.Fatal(err) - } - - if err := cfg.Save(); err != nil { - log.Fatal(err) - } + return nil } -func export(c *cli.Context) { - content, err := config.Dump(c.Bool("private"), c.Bool("full")) +func export(c *cli.Context) error { + content, err := config.Export(c.Bool("private"), c.Bool("full")) if err != nil { log.Fatal(err) } @@ -262,9 +203,11 @@ func export(c *cli.Context) { if output == "" { fmt.Println(content) } else { - err := ioutil.WriteFile(output, []byte(content), 0400) + err := util.WriteFileAtomic(output, []byte(content), 0400) if err != nil { log.Fatal(err) } } + + return nil } diff --git a/cmd/control/console.go b/cmd/control/console.go new file mode 100644 index 00000000..9bd25a51 --- /dev/null +++ b/cmd/control/console.go @@ -0,0 +1,128 @@ +package control + +import ( + "bufio" + "fmt" + "os" + + "golang.org/x/net/context" + + log "github.com/Sirupsen/logrus" + "github.com/codegangsta/cli" + composeConfig "github.com/docker/libcompose/config" + "github.com/docker/libcompose/project/options" + "github.com/rancher/os/compose" + "github.com/rancher/os/config" + "github.com/rancher/os/util/network" +) + +func consoleSubcommands() []cli.Command { + return []cli.Command{ + { + Name: "switch", + Usage: "switch console without a reboot", + Action: consoleSwitch, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "force, f", + Usage: "do not prompt for input", + }, + }, + }, + { + Name: "enable", + Usage: "set console to be switched on next reboot", + Action: consoleEnable, + }, + { + Name: "list", + Usage: "list available consoles", + Action: consoleList, + }, + } +} + +func consoleSwitch(c *cli.Context) error { + if len(c.Args()) != 1 { + log.Fatal("Must specify exactly one console to switch to") + } + newConsole := c.Args()[0] + + if !c.Bool("force") { + in := bufio.NewReader(os.Stdin) + fmt.Println("Switching consoles will destroy the current console container and restart Docker.") + fmt.Println("Note: You will also be logged out.") + if !yes(in, "Continue") { + return nil + } + } + + cfg := config.LoadConfig() + + if newConsole != "default" { + if err := compose.StageServices(cfg, newConsole); err != nil { + return err + } + } + + service, err := compose.CreateService(nil, "switch-console", &composeConfig.ServiceConfigV1{ + LogDriver: "json-file", + Privileged: true, + Net: "host", + Pid: "host", + Image: fmt.Sprintf("rancher/os-base:%s", config.VERSION), + Labels: map[string]string{ + config.SCOPE: config.SYSTEM, + }, + Command: []string{"/usr/bin/switch-console", newConsole}, + VolumesFrom: []string{"all-volumes"}, + }) + if err != nil { + return err + } + + if err = service.Delete(context.Background(), options.Delete{}); err != nil { + return err + } + if err = service.Up(context.Background(), options.Up{}); err != nil { + return err + } + return service.Log(context.Background(), true) +} + +func consoleEnable(c *cli.Context) error { + if len(c.Args()) != 1 { + log.Fatal("Must specify exactly one console to enable") + } + newConsole := c.Args()[0] + + cfg := config.LoadConfig() + + if newConsole != "default" { + if err := compose.StageServices(cfg, newConsole); err != nil { + return err + } + } + + if err := config.Set("rancher.console", newConsole); err != nil { + log.Errorf("Failed to update 'rancher.console': %v", err) + } + + return nil +} + +func consoleList(c *cli.Context) error { + cfg := config.LoadConfig() + + consoles, err := network.GetConsoles(cfg.Rancher.Repositories.ToArray()) + if err != nil { + return err + } + + fmt.Println("default") + for _, console := range consoles { + fmt.Println(console) + } + + return nil +} diff --git a/cmd/control/dev.go b/cmd/control/dev.go index 5bade13c..b7626909 100644 --- a/cmd/control/dev.go +++ b/cmd/control/dev.go @@ -7,8 +7,9 @@ import ( "github.com/rancher/os/util" ) -func devAction(c *cli.Context) { +func devAction(c *cli.Context) error { if len(c.Args()) > 0 { fmt.Println(util.ResolveDevice(c.Args()[0])) } + return nil } diff --git a/cmd/control/env.go b/cmd/control/env.go index f1bcd47b..166c6ad3 100644 --- a/cmd/control/env.go +++ b/cmd/control/env.go @@ -11,15 +11,12 @@ import ( "github.com/rancher/os/util" ) -func envAction(c *cli.Context) { - cfg, err := config.LoadConfig() - if err != nil { - log.Fatal(err) - } +func envAction(c *cli.Context) error { + cfg := config.LoadConfig() args := c.Args() if len(args) == 0 { - return + return nil } osEnv := os.Environ() @@ -39,4 +36,6 @@ func envAction(c *cli.Context) { if err := syscall.Exec(args[0], args, util.Map2KVPairs(envMap)); err != nil { log.Fatal(err) } + + return nil } diff --git a/cmd/control/install.go b/cmd/control/install.go index e675bdc5..dbf6374a 100644 --- a/cmd/control/install.go +++ b/cmd/control/install.go @@ -48,7 +48,7 @@ var installCommand = cli.Command{ }, } -func installAction(c *cli.Context) { +func installAction(c *cli.Context) error { if c.Args().Present() { log.Fatalf("invalid arguments %v", c.Args()) } @@ -58,10 +58,7 @@ func installAction(c *cli.Context) { } image := c.String("image") - cfg, err := config.LoadConfig() - if err != nil { - log.WithFields(log.Fields{"err": err}).Fatal("ros install: failed to load config") - } + cfg := config.LoadConfig() if image == "" { image = cfg.Rancher.Upgrade.Image + ":" + config.VERSION + config.SUFFIX } @@ -89,6 +86,8 @@ func installAction(c *cli.Context) { if err := runInstall(image, installType, cloudConfig, device, force, reboot); err != nil { log.WithFields(log.Fields{"err": err}).Fatal("Failed to run install") } + + return nil } func runInstall(image, installType, cloudConfig, device string, force, reboot bool) error { diff --git a/cmd/control/os.go b/cmd/control/os.go index 490e64bc..c492a784 100644 --- a/cmd/control/os.go +++ b/cmd/control/os.go @@ -9,13 +9,15 @@ import ( "os" "strings" + "golang.org/x/net/context" + log "github.com/Sirupsen/logrus" yaml "github.com/cloudfoundry-incubator/candiedyaml" - dockerClient "github.com/fsouza/go-dockerclient" - "github.com/codegangsta/cli" - "github.com/docker/libcompose/project" + dockerClient "github.com/docker/engine-api/client" + composeConfig "github.com/docker/libcompose/config" + "github.com/docker/libcompose/project/options" "github.com/rancher/os/cmd/power" "github.com/rancher/os/compose" "github.com/rancher/os/config" @@ -115,7 +117,7 @@ func getImages() (*Images, error) { return parseBody(body) } -func osMetaDataGet(c *cli.Context) { +func osMetaDataGet(c *cli.Context) error { images, err := getImages() if err != nil { log.Fatal(err) @@ -127,13 +129,15 @@ func osMetaDataGet(c *cli.Context) { } for _, image := range images.Available { - _, err := client.InspectImage(image) - if err == dockerClient.ErrNoSuchImage { + _, _, err := client.ImageInspectWithRaw(context.Background(), image, false) + if dockerClient.IsErrImageNotFound(err) { fmt.Println(image, "remote") } else { fmt.Println(image, "local") } } + + return nil } func getLatestImage() (string, error) { @@ -145,7 +149,7 @@ func getLatestImage() (string, error) { return images.Current, nil } -func osUpgrade(c *cli.Context) { +func osUpgrade(c *cli.Context) error { image := c.String("image") if image == "" { @@ -164,20 +168,13 @@ func osUpgrade(c *cli.Context) { if err := startUpgradeContainer(image, c.Bool("stage"), c.Bool("force"), !c.Bool("no-reboot"), c.Bool("kexec"), c.Bool("upgrade-console"), c.String("append")); err != nil { log.Fatal(err) } + + return nil } -func osVersion(c *cli.Context) { +func osVersion(c *cli.Context) error { fmt.Println(config.VERSION) -} - -func yes(in *bufio.Reader, question string) bool { - fmt.Printf("%s [y/N]: ", question) - line, err := in.ReadString('\n') - if err != nil { - log.Fatal(err) - } - - return strings.ToLower(line[0:1]) == "y" + return nil } func startUpgradeContainer(image string, stage, force, reboot, kexec bool, upgradeConsole bool, kernelArgs string) error { @@ -198,27 +195,31 @@ func startUpgradeContainer(image string, stage, force, reboot, kexec bool, upgra } if upgradeConsole { - cfg, err := config.LoadConfig() - if err != nil { - log.Fatal(err) - } - - cfg.Rancher.ForceConsoleRebuild = true - if err := cfg.Save(); err != nil { + if err := config.Set("rancher.force_console_rebuild", true); err != nil { log.Fatal(err) } } - container, err := compose.CreateService(nil, "os-upgrade", &project.ServiceConfig{ + fmt.Printf("Upgrading to %s\n", image) + confirmation := "Continue" + imageSplit := strings.Split(image, ":") + if len(imageSplit) > 1 && imageSplit[1] == config.VERSION+config.SUFFIX { + confirmation = fmt.Sprintf("Already at version %s. Continue anyway", imageSplit[1]) + } + if !force && !yes(in, confirmation) { + os.Exit(1) + } + + container, err := compose.CreateService(nil, "os-upgrade", &composeConfig.ServiceConfigV1{ LogDriver: "json-file", Privileged: true, Net: "host", Pid: "host", Image: image, - Labels: project.NewSliceorMap(map[string]string{ + Labels: map[string]string{ config.SCOPE: config.SYSTEM, - }), - Command: project.NewCommand(command...), + }, + Command: command, }) if err != nil { return err @@ -230,41 +231,30 @@ func startUpgradeContainer(image string, stage, force, reboot, kexec bool, upgra } // Only pull image if not found locally - if _, err := client.InspectImage(image); err != nil { - if err := container.Pull(); err != nil { + if _, _, err := client.ImageInspectWithRaw(context.Background(), image, false); err != nil { + if err := container.Pull(context.Background()); err != nil { return err } } if !stage { - imageSplit := strings.Split(image, ":") - if len(imageSplit) > 1 && imageSplit[1] == config.VERSION { - if !force && !yes(in, fmt.Sprintf("Already at version %s. Continue anyways", imageSplit[1])) { - os.Exit(1) - } - } else { - fmt.Printf("Upgrading to %s\n", image) - - if !force && !yes(in, "Continue") { - os.Exit(1) - } - } - // If there is already an upgrade container, delete it // Up() should to this, but currently does not due to a bug - if err := container.Delete(); err != nil { + if err := container.Delete(context.Background(), options.Delete{}); err != nil { return err } - if err := container.Up(); err != nil { + if err := container.Up(context.Background(), options.Up{ + Log: true, + }); err != nil { return err } - if err := container.Log(); err != nil { + if err := container.Log(context.Background(), true); err != nil { return err } - if err := container.Delete(); err != nil { + if err := container.Delete(context.Background(), options.Delete{}); err != nil { return err } @@ -288,10 +278,6 @@ func parseBody(body []byte) (*Images, error) { } func getUpgradeUrl() (string, error) { - cfg, err := config.LoadConfig() - if err != nil { - return "", err - } - + cfg := config.LoadConfig() return cfg.Rancher.Upgrade.Url, nil } diff --git a/cmd/control/selinux.go b/cmd/control/selinux.go index e6afc8c6..e80ef61c 100644 --- a/cmd/control/selinux.go +++ b/cmd/control/selinux.go @@ -2,16 +2,17 @@ package control import ( "fmt" + "syscall" + "github.com/codegangsta/cli" "github.com/rancher/os/config" - "syscall" ) func selinuxCommand() cli.Command { app := cli.Command{} app.Name = "selinux" app.Usage = "Launch SELinux tools container." - app.Action = func(c *cli.Context) { + app.Action = func(c *cli.Context) error { argv := []string{"system-docker", "run", "-it", "--privileged", "--rm", "--net", "host", "--pid", "host", "--ipc", "host", "-v", "/usr/bin/docker:/usr/bin/docker.dist:ro", @@ -49,8 +50,9 @@ func selinuxCommand() cli.Command { "-v", "/etc/selinux:/etc/selinux", "-v", "/var/lib/selinux:/var/lib/selinux", "-v", "/usr/share/selinux:/usr/share/selinux", - fmt.Sprintf("rancher/os-selinuxtools:%s", config.VERSION + config.SUFFIX), "bash"} + fmt.Sprintf("rancher/os-selinuxtools:%s", config.VERSION+config.SUFFIX), "bash"} syscall.Exec("/bin/system-docker", argv, []string{}) + return nil } return app diff --git a/cmd/control/service.go b/cmd/control/service.go index 3a135f2d..b681804c 100644 --- a/cmd/control/service.go +++ b/cmd/control/service.go @@ -17,13 +17,9 @@ import ( type projectFactory struct { } -func (p *projectFactory) Create(c *cli.Context) (*project.Project, error) { - cfg, err := config.LoadConfig() - if err != nil { - return nil, err - } - - return compose.GetProject(cfg, true) +func (p *projectFactory) Create(c *cli.Context) (project.APIProject, error) { + cfg := config.LoadConfig() + return compose.GetProject(cfg, true, false) } func beforeApp(c *cli.Context) error { @@ -86,12 +82,13 @@ func serviceSubCommands() []cli.Command { } } -func disable(c *cli.Context) { +func updateIncludedServices(cfg *config.CloudConfig) error { + return config.Set("rancher.services_include", cfg.Rancher.ServicesInclude) +} + +func disable(c *cli.Context) error { changed := false - cfg, err := config.LoadConfig() - if err != nil { - logrus.Fatal(err) - } + cfg := config.LoadConfig() for _, service := range c.Args() { if _, ok := cfg.Rancher.ServicesInclude[service]; !ok { @@ -103,18 +100,17 @@ func disable(c *cli.Context) { } if changed { - if err = cfg.Save(); err != nil { + if err := updateIncludedServices(cfg); err != nil { logrus.Fatal(err) } } + + return nil } -func del(c *cli.Context) { +func del(c *cli.Context) error { changed := false - cfg, err := config.LoadConfig() - if err != nil { - logrus.Fatal(err) - } + cfg := config.LoadConfig() for _, service := range c.Args() { if _, ok := cfg.Rancher.ServicesInclude[service]; !ok { @@ -125,17 +121,16 @@ func del(c *cli.Context) { } if changed { - if err = cfg.Save(); err != nil { + if err := updateIncludedServices(cfg); err != nil { logrus.Fatal(err) } } + + return nil } -func enable(c *cli.Context) { - cfg, err := config.LoadConfig() - if err != nil { - logrus.Fatal(err) - } +func enable(c *cli.Context) error { + cfg := config.LoadConfig() var enabledServices []string @@ -155,17 +150,16 @@ func enable(c *cli.Context) { logrus.Fatal(err) } - if err := cfg.Save(); err != nil { + if err := updateIncludedServices(cfg); err != nil { logrus.Fatal(err) } } + + return nil } -func list(c *cli.Context) { - cfg, err := config.LoadConfig() - if err != nil { - logrus.Fatal(err) - } +func list(c *cli.Context) error { + cfg := config.LoadConfig() clone := make(map[string]bool) for service, enabled := range cfg.Rancher.ServicesInclude { @@ -197,4 +191,6 @@ func list(c *cli.Context) { fmt.Printf("disabled %s\n", service) } } + + return nil } diff --git a/cmd/control/tlsconf.go b/cmd/control/tlsconf.go index 33b2d779..f6c25791 100644 --- a/cmd/control/tlsconf.go +++ b/cmd/control/tlsconf.go @@ -10,11 +10,20 @@ import ( "github.com/codegangsta/cli" machineUtil "github.com/docker/machine/utils" "github.com/rancher/os/config" + "github.com/rancher/os/util" ) const ( - NAME string = "rancher" - BITS int = 2048 + NAME string = "rancher" + BITS int = 2048 + ServerTlsPath string = "/etc/docker/tls" + ClientTlsPath string = "/home/rancher/.docker" + Cert string = "cert.pem" + Key string = "key.pem" + ServerCert string = "server-cert.pem" + ServerKey string = "server-key.pem" + CaCert string = "ca.pem" + CaKey string = "ca-key.pem" ) func tlsConfCommands() []cli.Command { @@ -44,101 +53,81 @@ func tlsConfCommands() []cli.Command { } } -func writeCerts(generateServer bool, hostname []string, cfg *config.CloudConfig, certPath, keyPath, caCertPath, caKeyPath string) error { +func writeCerts(generateServer bool, hostname []string, certPath, keyPath, caCertPath, caKeyPath string) error { if !generateServer { return machineUtil.GenerateCert([]string{""}, certPath, keyPath, caCertPath, caKeyPath, NAME, BITS) } - if cfg.Rancher.Docker.ServerKey == "" || cfg.Rancher.Docker.ServerCert == "" { - err := machineUtil.GenerateCert(hostname, certPath, keyPath, caCertPath, caKeyPath, NAME, BITS) - if err != nil { - return err - } - - cert, err := ioutil.ReadFile(certPath) - if err != nil { - return err - } - - key, err := ioutil.ReadFile(keyPath) - if err != nil { - return err - } - - cfg, err = cfg.Merge(map[interface{}]interface{}{ - "rancher": map[interface{}]interface{}{ - "docker": map[interface{}]interface{}{ - "server_cert": string(cert), - "server_key": string(key), - }, - }, - }) - if err != nil { - return err - } - - return cfg.Save() // certPath, keyPath are already written to by machineUtil.GenerateCert() - } - - if err := ioutil.WriteFile(certPath, []byte(cfg.Rancher.Docker.ServerCert), 0400); err != nil { + if err := machineUtil.GenerateCert(hostname, certPath, keyPath, caCertPath, caKeyPath, NAME, BITS); err != nil { return err } - return ioutil.WriteFile(keyPath, []byte(cfg.Rancher.Docker.ServerKey), 0400) + cert, err := ioutil.ReadFile(certPath) + if err != nil { + return err + } + key, err := ioutil.ReadFile(keyPath) + if err != nil { + return err + } + + // certPath, keyPath are already written to by machineUtil.GenerateCert() + if err := config.Set("rancher.docker.server_cert", string(cert)); err != nil { + return err + } + if err := config.Set("rancher.docker.server_key", string(key)); err != nil { + return err + } + + return nil } -func writeCaCerts(cfg *config.CloudConfig, caCertPath, caKeyPath string) (*config.CloudConfig, error) { +func writeCaCerts(cfg *config.CloudConfig, caCertPath, caKeyPath string) error { if cfg.Rancher.Docker.CACert == "" { if err := machineUtil.GenerateCACertificate(caCertPath, caKeyPath, NAME, BITS); err != nil { - return nil, err + return err } caCert, err := ioutil.ReadFile(caCertPath) if err != nil { - return nil, err + return err } caKey, err := ioutil.ReadFile(caKeyPath) if err != nil { - return nil, err + return err } - cfg, err = cfg.Merge(map[interface{}]interface{}{ - "rancher": map[interface{}]interface{}{ - "docker": map[interface{}]interface{}{ - "ca_key": string(caKey), - "ca_cert": string(caCert), - }, - }, - }) - if err != nil { - return nil, err + // caCertPath, caKeyPath are already written to by machineUtil.GenerateCACertificate() + if err := config.Set("rancher.docker.ca_cert", string(caCert)); err != nil { + return err + } + if err := config.Set("rancher.docker.ca_key", string(caKey)); err != nil { + return err + } + } else { + cfg = config.LoadConfig() + + if err := util.WriteFileAtomic(caCertPath, []byte(cfg.Rancher.Docker.CACert), 0400); err != nil { + return err } - if err = cfg.Save(); err != nil { - return nil, err + if err := util.WriteFileAtomic(caKeyPath, []byte(cfg.Rancher.Docker.CAKey), 0400); err != nil { + return err } - - return cfg, nil // caCertPath, caKeyPath are already written to by machineUtil.GenerateCACertificate() } - if err := ioutil.WriteFile(caCertPath, []byte(cfg.Rancher.Docker.CACert), 0400); err != nil { - return nil, err - } - - if err := ioutil.WriteFile(caKeyPath, []byte(cfg.Rancher.Docker.CAKey), 0400); err != nil { - return nil, err - } - - return cfg, nil + return nil } -func tlsConfCreate(c *cli.Context) { +func tlsConfCreate(c *cli.Context) error { err := generate(c) if err != nil { log.Fatal(err) } + + return nil } func generate(c *cli.Context) error { @@ -150,27 +139,22 @@ func generate(c *cli.Context) error { } func Generate(generateServer bool, outDir string, hostnames []string) error { - cfg, err := config.LoadConfig() - if err != nil { - return err - } - if outDir == "" { if generateServer { - outDir = "/etc/docker/tls" + outDir = ServerTlsPath } else { - outDir = "/home/rancher/.docker" + outDir = ClientTlsPath } log.Infof("Out directory (-d, --dir) not specified, using default: %s", outDir) } - caCertPath := filepath.Join(outDir, "ca.pem") - caKeyPath := filepath.Join(outDir, "ca-key.pem") - certPath := filepath.Join(outDir, "cert.pem") - keyPath := filepath.Join(outDir, "key.pem") + caCertPath := filepath.Join(outDir, CaCert) + caKeyPath := filepath.Join(outDir, CaKey) + certPath := filepath.Join(outDir, Cert) + keyPath := filepath.Join(outDir, Key) if generateServer { - certPath = filepath.Join(outDir, "server-cert.pem") - keyPath = filepath.Join(outDir, "server-key.pem") + certPath = filepath.Join(outDir, ServerCert) + keyPath = filepath.Join(outDir, ServerKey) } if _, err := os.Stat(outDir); os.IsNotExist(err) { @@ -179,11 +163,11 @@ func Generate(generateServer bool, outDir string, hostnames []string) error { } } - cfg, err = writeCaCerts(cfg, caCertPath, caKeyPath) - if err != nil { + cfg := config.LoadConfig() + if err := writeCaCerts(cfg, caCertPath, caKeyPath); err != nil { return err } - if err := writeCerts(generateServer, hostnames, cfg, certPath, keyPath, caCertPath, caKeyPath); err != nil { + if err := writeCerts(generateServer, hostnames, certPath, keyPath, caCertPath, caKeyPath); err != nil { return err } diff --git a/cmd/control/util.go b/cmd/control/util.go new file mode 100644 index 00000000..6ab95b67 --- /dev/null +++ b/cmd/control/util.go @@ -0,0 +1,19 @@ +package control + +import ( + "bufio" + "fmt" + "strings" + + log "github.com/Sirupsen/logrus" +) + +func yes(in *bufio.Reader, question string) bool { + fmt.Printf("%s [y/N]: ", question) + line, err := in.ReadString('\n') + if err != nil { + log.Fatal(err) + } + + return strings.ToLower(line[0:1]) == "y" +} diff --git a/cmd/network/network.go b/cmd/network/network.go index ca4514bf..87a8d3d6 100644 --- a/cmd/network/network.go +++ b/cmd/network/network.go @@ -3,55 +3,58 @@ package network import ( "flag" "os" - "os/exec" + + "golang.org/x/net/context" log "github.com/Sirupsen/logrus" "github.com/docker/libnetwork/resolvconf" "github.com/rancher/netconf" "github.com/rancher/os/config" + "github.com/rancher/os/docker" "github.com/rancher/os/hostname" ) -const ( - NETWORK_DONE = "/var/run/network.done" - WAIT_FOR_NETWORK = "wait-for-network" -) - var ( - daemon bool - flags *flag.FlagSet + stopNetworkPre bool + flags *flag.FlagSet ) func init() { flags = flag.NewFlagSet(os.Args[0], flag.ContinueOnError) - flags.BoolVar(&daemon, "daemon", false, "run dhcpd as daemon") -} - -func sendTerm(proc string) { - cmd := exec.Command("killall", "-TERM", proc) - cmd.Stderr = os.Stderr - cmd.Stdout = os.Stdout - cmd.Run() + flags.BoolVar(&stopNetworkPre, "stop-network-pre", false, "") } func Main() { flags.Parse(os.Args[1:]) - log.Infof("Running network: daemon=%v", daemon) + log.Infof("Running network: stop-network-pre=%v", stopNetworkPre) - os.Remove(NETWORK_DONE) // ignore error - cfg, err := config.LoadConfig() - if err != nil { - log.Fatal(err) + if stopNetworkPre { + client, err := docker.NewSystemClient() + if err != nil { + log.Error(err) + } + + err = client.ContainerStop(context.Background(), "network-pre", 10) + if err != nil { + log.Error(err) + } + + _, err = client.ContainerWait(context.Background(), "network-pre") + if err != nil { + log.Error(err) + } } + cfg := config.LoadConfig() + nameservers := cfg.Rancher.Network.Dns.Nameservers search := cfg.Rancher.Network.Dns.Search userSetDns := len(nameservers) > 0 || len(search) > 0 if !userSetDns { - nameservers = cfg.Rancher.DefaultNetwork.Dns.Nameservers - search = cfg.Rancher.DefaultNetwork.Dns.Search + nameservers = cfg.Rancher.Defaults.Network.Dns.Nameservers + search = cfg.Rancher.Defaults.Network.Dns.Search } if _, err := resolvconf.Build("/etc/resolv.conf", nameservers, search, nil); err != nil { @@ -75,14 +78,5 @@ func Main() { log.Error(err) } - if f, err := os.Create(NETWORK_DONE); err != nil { - log.Error(err) - } else { - f.Close() - } - sendTerm(WAIT_FOR_NETWORK) - - if daemon { - select {} - } + select {} } diff --git a/cmd/power/power.go b/cmd/power/power.go index 4d1c550c..632e8aed 100644 --- a/cmd/power/power.go +++ b/cmd/power/power.go @@ -1,7 +1,6 @@ package power import ( - "bufio" "errors" "os" "path/filepath" @@ -9,14 +8,15 @@ import ( "strings" "syscall" + "golang.org/x/net/context" + log "github.com/Sirupsen/logrus" - dockerClient "github.com/fsouza/go-dockerclient" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/filters" "github.com/rancher/os/docker" -) - -const ( - DOCKER_CGROUPS_FILE = "/proc/self/cgroup" + "github.com/rancher/os/util" ) func runDocker(name string) error { @@ -36,11 +36,10 @@ func runDocker(name string) error { cmd = os.Args } - exiting, err := client.InspectContainer(name) - if exiting != nil { - err := client.RemoveContainer(dockerClient.RemoveContainerOptions{ - ID: exiting.ID, - Force: true, + existing, err := client.ContainerInspect(context.Background(), name) + if err == nil && existing.ID != "" { + err := client.ContainerRemove(context.Background(), types.ContainerRemoveOptions{ + ContainerID: existing.ID, }) if err != nil { @@ -48,53 +47,50 @@ func runDocker(name string) error { } } - currentContainerId, err := getCurrentContainerId() + currentContainerId, err := util.GetCurrentContainerId() if err != nil { return err } - currentContainer, err := client.InspectContainer(currentContainerId) + currentContainer, err := client.ContainerInspect(context.Background(), currentContainerId) if err != nil { return err } - powerContainer, err := client.CreateContainer(dockerClient.CreateContainerOptions{ - Name: name, - Config: &dockerClient.Config{ + powerContainer, err := client.ContainerCreate(context.Background(), + &container.Config{ Image: currentContainer.Config.Image, Cmd: cmd, Env: []string{ "IN_DOCKER=true", }, }, - HostConfig: &dockerClient.HostConfig{ + &container.HostConfig{ PidMode: "host", VolumesFrom: []string{ currentContainer.ID, }, Privileged: true, - }, - }) + }, nil, name) if err != nil { return err } go func() { - client.AttachToContainer(dockerClient.AttachToContainerOptions{ - Container: powerContainer.ID, - OutputStream: os.Stdout, - ErrorStream: os.Stderr, - Stderr: true, - Stdout: true, + client.ContainerAttach(context.Background(), types.ContainerAttachOptions{ + ContainerID: powerContainer.ID, + Stream: true, + Stderr: true, + Stdout: true, }) }() - err = client.StartContainer(powerContainer.ID, powerContainer.HostConfig) + err = client.ContainerStart(context.Background(), powerContainer.ID) if err != nil { return err } - _, err = client.WaitContainer(powerContainer.ID) + _, err = client.ContainerWait(context.Background(), powerContainer.ID) if err != nil { log.Fatal(err) @@ -172,19 +168,20 @@ func shutDownContainers() error { return err } - opts := dockerClient.ListContainersOptions{ - All: true, - Filters: map[string][]string{ - "status": {"running"}, - }, + filter := filters.NewArgs() + filter.Add("status", "running") + + opts := types.ContainerListOptions{ + All: true, + Filter: filter, } - containers, err := client.ListContainers(opts) + containers, err := client.ContainerList(context.Background(), opts) if err != nil { return err } - currentContainerId, err := getCurrentContainerId() + currentContainerId, err := util.GetCurrentContainerId() if err != nil { return err } @@ -197,7 +194,7 @@ func shutDownContainers() error { } log.Infof("Stopping %s : %v", container.ID[:12], container.Names) - stopErr := client.StopContainer(container.ID, uint(timeout)) + stopErr := client.ContainerStop(context.Background(), container.ID, timeout) if stopErr != nil { stopErrorStrings = append(stopErrorStrings, " ["+container.ID+"] "+stopErr.Error()) } @@ -209,7 +206,7 @@ func shutDownContainers() error { if container.ID == currentContainerId { continue } - _, waitErr := client.WaitContainer(container.ID) + _, waitErr := client.ContainerWait(context.Background(), container.ID) if waitErr != nil { waitErrorStrings = append(waitErrorStrings, " ["+container.ID+"] "+waitErr.Error()) } @@ -221,35 +218,3 @@ func shutDownContainers() error { return nil } - -func getCurrentContainerId() (string, error) { - file, err := os.Open(DOCKER_CGROUPS_FILE) - - if err != nil { - return "", err - } - - fileReader := bufio.NewScanner(file) - if !fileReader.Scan() { - return "", errors.New("Empty file /proc/self/cgroup") - } - line := fileReader.Text() - parts := strings.Split(line, "/") - - for len(parts) != 3 { - if !fileReader.Scan() { - return "", errors.New("Found no docker cgroups") - } - line = fileReader.Text() - parts = strings.Split(line, "/") - if len(parts) == 3 { - if strings.HasSuffix(parts[1], "docker") { - break - } else { - parts = nil - } - } - } - - return parts[len(parts)-1:][0], nil -} diff --git a/cmd/power/shutdown.go b/cmd/power/shutdown.go index cc131e60..4993afb3 100644 --- a/cmd/power/shutdown.go +++ b/cmd/power/shutdown.go @@ -31,7 +31,7 @@ func Main() { app.Run(os.Args) } -func shutdown(c *cli.Context) { +func shutdown(c *cli.Context) error { common("") reboot := c.String("r") poweroff := c.String("h") @@ -41,4 +41,6 @@ func shutdown(c *cli.Context) { } else if poweroff == "now" { PowerOff() } + + return nil } diff --git a/cmd/respawn/respawn.go b/cmd/respawn/respawn.go index 1cb34012..89149c0a 100644 --- a/cmd/respawn/respawn.go +++ b/cmd/respawn/respawn.go @@ -48,7 +48,7 @@ func setupSigterm() { }() } -func run(c *cli.Context) { +func run(c *cli.Context) error { setupSigterm() var stream io.Reader = os.Stdin @@ -79,6 +79,7 @@ func run(c *cli.Context) { } wg.Wait() + return nil } func addProcess(process *os.Process) { diff --git a/cmd/switchconsole/switch_console.go b/cmd/switchconsole/switch_console.go new file mode 100644 index 00000000..fef54ea6 --- /dev/null +++ b/cmd/switchconsole/switch_console.go @@ -0,0 +1,45 @@ +package switchconsole + +import ( + "os" + + log "github.com/Sirupsen/logrus" + "github.com/docker/libcompose/project/options" + "github.com/rancher/os/compose" + "github.com/rancher/os/config" + "golang.org/x/net/context" +) + +func Main() { + if len(os.Args) != 2 { + log.Fatal("Must specify exactly one existing container") + } + newConsole := os.Args[1] + + cfg := config.LoadConfig() + + project, err := compose.GetProject(cfg, true, false) + if err != nil { + log.Fatal(err) + } + + if newConsole != "default" { + if err = compose.LoadService(project, cfg, true, newConsole); err != nil { + log.Fatal(err) + } + } + + if err = project.Up(context.Background(), options.Up{ + Log: true, + }, "console"); err != nil { + log.Fatal(err) + } + + if err = project.Restart(context.Background(), 10, "docker"); err != nil { + log.Errorf("Failed to restart Docker: %v", err) + } + + if err = config.Set("rancher.console", newConsole); err != nil { + log.Errorf("Failed to update 'rancher.console': %v", err) + } +} diff --git a/cmd/systemdocker/system-docker.go b/cmd/systemdocker/system-docker.go index f925ecca..40683457 100644 --- a/cmd/systemdocker/system-docker.go +++ b/cmd/systemdocker/system-docker.go @@ -1,30 +1,21 @@ package systemdocker import ( + "log" "os" - "strings" - "syscall" - log "github.com/Sirupsen/logrus" + "github.com/docker/docker/docker" "github.com/rancher/os/config" ) func Main() { - var newEnv []string - for _, env := range os.Environ() { - if !strings.HasPrefix(env, "DOCKER_HOST=") { - newEnv = append(newEnv, env) - } - } - - newEnv = append(newEnv, "DOCKER_HOST="+config.DOCKER_SYSTEM_HOST) - if os.Geteuid() != 0 { log.Fatalf("%s: Need to be root", os.Args[0]) } - os.Args[0] = config.DOCKER_DIST_BIN - if err := syscall.Exec(os.Args[0], os.Args, newEnv); err != nil { - log.Fatal(err) + if os.Getenv("DOCKER_HOST") == "" { + os.Setenv("DOCKER_HOST", config.DOCKER_SYSTEM_HOST) } + + docker.Main() } diff --git a/cmd/userdocker/main.go b/cmd/userdocker/main.go index 194a5038..08b2b097 100644 --- a/cmd/userdocker/main.go +++ b/cmd/userdocker/main.go @@ -1,153 +1,211 @@ package userdocker import ( - "bufio" - "encoding/json" - "fmt" + "io" + "io/ioutil" "os" - "os/exec" "os/signal" - "strings" + "strconv" "syscall" "time" + "golang.org/x/net/context" + + "path/filepath" + log "github.com/Sirupsen/logrus" - "github.com/docker/libcompose/docker" + "github.com/docker/engine-api/types" + composeClient "github.com/docker/libcompose/docker/client" "github.com/docker/libcompose/project" "github.com/rancher/os/cmd/control" "github.com/rancher/os/compose" "github.com/rancher/os/config" - - "github.com/opencontainers/runc/libcontainer/cgroups" - _ "github.com/opencontainers/runc/libcontainer/nsenter" - "github.com/opencontainers/runc/libcontainer/system" + rosDocker "github.com/rancher/os/docker" + "github.com/rancher/os/util" ) const ( DEFAULT_STORAGE_CONTEXT = "console" + DOCKER_PID_FILE = "/var/run/docker.pid" + DOCKER_COMMAND = "docker-init" userDocker = "user-docker" ) func Main() { - cfg, err := config.LoadConfig() + cfg := config.LoadConfig() + + execID, resp, err := startDocker(cfg) if err != nil { log.Fatal(err) } - if len(os.Args) == 1 { - if err := enter(cfg); err != nil { - log.Fatal(err) + process, err := getDockerProcess() + if err != nil { + log.Fatal(err) + } + + handleTerm(process) + + // Wait for Docker daemon to exit + io.Copy(ioutil.Discard, resp.Reader) + resp.Close() + + client, err := rosDocker.NewSystemClient() + if err != nil { + log.Fatal(err) + } + + state, err := client.ContainerExecInspect(context.Background(), execID) + if err != nil { + log.Fatal(err) + } + + // Proxy exit code + os.Exit(state.ExitCode) +} + +func writeCerts(cfg *config.CloudConfig) error { + outDir := control.ServerTlsPath + if err := os.MkdirAll(outDir, 0700); err != nil { + return err + } + caCertPath := filepath.Join(outDir, control.CaCert) + caKeyPath := filepath.Join(outDir, control.CaKey) + serverCertPath := filepath.Join(outDir, control.ServerCert) + serverKeyPath := filepath.Join(outDir, control.ServerKey) + if cfg.Rancher.Docker.CACert != "" { + if err := util.WriteFileAtomic(caCertPath, []byte(cfg.Rancher.Docker.CACert), 0400); err != nil { + return err } - } else { - if err := main(cfg); err != nil { - log.Fatal(err) + + if err := util.WriteFileAtomic(caKeyPath, []byte(cfg.Rancher.Docker.CAKey), 0400); err != nil { + return err } } -} - -func enter(cfg *config.CloudConfig) error { - context := cfg.Rancher.Docker.StorageContext - if context == "" { - context = DEFAULT_STORAGE_CONTEXT - } - - log.Infof("Starting Docker in context: %s", context) - - p, err := compose.GetProject(cfg, true) - if err != nil { - return err - } - - pid, err := waitForPid(context, p) - if err != nil { - return err - } - - log.Infof("%s PID %d", context, pid) - - return runNsenter(pid) -} - -type result struct { - Pid int `json:"Pid"` -} - -func findProgram(searchPaths ...string) string { - prog := "" - - for _, i := range searchPaths { - var err error - prog, err = exec.LookPath(i) - if err == nil { - break + if cfg.Rancher.Docker.ServerCert != "" { + if err := util.WriteFileAtomic(serverCertPath, []byte(cfg.Rancher.Docker.ServerCert), 0400); err != nil { + return err } - prog = i - } - return prog + if err := util.WriteFileAtomic(serverKeyPath, []byte(cfg.Rancher.Docker.ServerKey), 0400); err != nil { + return err + } + } + return nil } -func runNsenter(pid int) error { - args := []string{findProgram(userDocker), "main"} +func startDocker(cfg *config.CloudConfig) (string, types.HijackedResponse, error) { + storageContext := cfg.Rancher.Docker.StorageContext + if storageContext == "" { + storageContext = DEFAULT_STORAGE_CONTEXT + } - r, w, err := os.Pipe() + log.Infof("Starting Docker in context: %s", storageContext) + + p, err := compose.GetProject(cfg, true, false) if err != nil { - return err + return "", types.HijackedResponse{}, err } - cmd := &exec.Cmd{ - Path: args[0], - Args: args, - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - ExtraFiles: []*os.File{w}, - Env: append(os.Environ(), - "_LIBCONTAINER_INITPIPE=3", - fmt.Sprintf("_LIBCONTAINER_INITPID=%d", pid), - ), - } - - if err := cmd.Start(); err != nil { - return err - } - w.Close() - - var result result - if err := json.NewDecoder(r).Decode(&result); err != nil { - return err - } - - if err := cmd.Wait(); err != nil { - return err - } - - log.Infof("Docker PID %d", result.Pid) - - p, err := os.FindProcess(result.Pid) + pid, err := waitForPid(storageContext, p) if err != nil { - return err + return "", types.HijackedResponse{}, err } - handleTerm(p) + log.Infof("%s PID %d", storageContext, pid) - if err := switchCgroup(result.Pid, pid); err != nil { - return err + client, err := rosDocker.NewSystemClient() + if err != nil { + return "", types.HijackedResponse{}, err } - _, err = p.Wait() - return err + if err := os.Remove(DOCKER_PID_FILE); err != nil && !os.IsNotExist(err) { + return "", types.HijackedResponse{}, err + } + + dockerCfg := cfg.Rancher.Docker + + args := dockerCfg.FullArgs() + + log.Debugf("User Docker args: %v", args) + + if dockerCfg.TLS { + if err := writeCerts(cfg); err != nil { + return "", types.HijackedResponse{}, err + } + } + + cmd := []string{"env"} + log.Info(dockerCfg.AppendEnv()) + cmd = append(cmd, dockerCfg.AppendEnv()...) + cmd = append(cmd, DOCKER_COMMAND) + cmd = append(cmd, args...) + log.Infof("Running %v", cmd) + + resp, err := client.ContainerExecCreate(context.Background(), types.ExecConfig{ + Container: storageContext, + Privileged: true, + AttachStderr: true, + AttachStdout: true, + Cmd: cmd, + }) + if err != nil { + return "", types.HijackedResponse{}, err + } + + attachResp, err := client.ContainerExecAttach(context.Background(), resp.ID, types.ExecConfig{ + Detach: false, + AttachStderr: true, + AttachStdout: true, + }) + if err != nil { + return "", types.HijackedResponse{}, err + } + + if err := client.ContainerExecStart(context.Background(), resp.ID, types.ExecStartCheck{ + Detach: false, + }); err != nil { + return "", types.HijackedResponse{}, err + } + + return resp.ID, attachResp, nil } -func handleTerm(p *os.Process) { +func getDockerProcess() (*os.Process, error) { + pidBytes, err := waitForFile(DOCKER_PID_FILE) + if err != nil { + return nil, err + } + dockerPid, err := strconv.Atoi(string(pidBytes)) + if err != nil { + return nil, err + } + return os.FindProcess(dockerPid) +} + +func handleTerm(process *os.Process) { term := make(chan os.Signal) signal.Notify(term, syscall.SIGTERM) go func() { <-term - p.Signal(syscall.SIGTERM) + process.Signal(syscall.SIGTERM) }() } +func waitForFile(file string) ([]byte, error) { + for { + contents, err := ioutil.ReadFile(file) + if os.IsNotExist(err) { + log.Infof("Waiting for %s", file) + time.Sleep(1 * time.Second) + } else if err != nil { + return nil, err + } else { + return contents, nil + } + } +} + func waitForPid(service string, project *project.Project) (int, error) { log.Infof("Getting PID for service: %s", service) for { @@ -166,7 +224,7 @@ func getPid(service string, project *project.Project) (int, error) { return 0, err } - containers, err := s.Containers() + containers, err := s.Containers(context.Background()) if err != nil { return 0, err } @@ -175,7 +233,7 @@ func getPid(service string, project *project.Project) (int, error) { return 0, nil } - client, err := docker.CreateClient(docker.ClientOpts{ + client, err := composeClient.Create(composeClient.Options{ Host: config.DOCKER_SYSTEM_HOST, }) if err != nil { @@ -187,8 +245,8 @@ func getPid(service string, project *project.Project) (int, error) { return 0, err } - info, err := client.InspectContainer(id) - if err != nil || info == nil { + info, err := client.ContainerInspect(context.Background(), id) + if err != nil || info.ID == "" { return 0, err } @@ -198,71 +256,3 @@ func getPid(service string, project *project.Project) (int, error) { return 0, nil } - -func main(cfg *config.CloudConfig) error { - os.Unsetenv("_LIBCONTAINER_INITPIPE") - os.Unsetenv("_LIBCONTAINER_INITPID") - - if err := system.ParentDeathSignal(syscall.SIGKILL).Set(); err != nil { - return err - } - - if err := os.Remove("/var/run/docker.pid"); err != nil && !os.IsNotExist(err) { - return err - } - - dockerCfg := cfg.Rancher.Docker - - args := dockerCfg.FullArgs() - - log.Debugf("User Docker args: %v", args) - - if dockerCfg.TLS { - log.Debug("Generating TLS certs if needed") - if err := control.Generate(true, "/etc/docker/tls", []string{"127.0.0.1", "*", "*.*", "*.*.*", "*.*.*.*"}); err != nil { - return err - } - } - - prog := findProgram("docker-init", "dockerlaunch", "docker") - if strings.Contains(prog, "dockerlaunch") { - args = append([]string{prog, "docker"}, args...) - } else { - args = append([]string{prog}, args...) - } - - log.Infof("Running %v", args) - return syscall.Exec(args[0], args, dockerCfg.AppendEnv()) -} - -func switchCgroup(src, target int) error { - cgroupFile := fmt.Sprintf("/proc/%d/cgroup", target) - f, err := os.Open(cgroupFile) - if err != nil { - return err - } - defer f.Close() - - targetCgroups := map[string]string{} - - s := bufio.NewScanner(f) - for s.Scan() { - text := s.Text() - parts := strings.Split(text, ":") - subparts := strings.Split(parts[1], "=") - subsystem := subparts[0] - if len(subparts) > 1 { - subsystem = subparts[1] - } - - targetPath := fmt.Sprintf("/host/sys/fs/cgroup/%s%s", subsystem, parts[2]) - log.Infof("Moving Docker to cgroup %s", targetPath) - targetCgroups[subsystem] = targetPath - } - - if err := s.Err(); err != nil { - return err - } - - return cgroups.EnterPid(targetCgroups, src) -} diff --git a/cmd/waitfornetwork/waitfornetwork.go b/cmd/waitfornetwork/waitfornetwork.go deleted file mode 100644 index 74a4326a..00000000 --- a/cmd/waitfornetwork/waitfornetwork.go +++ /dev/null @@ -1,23 +0,0 @@ -package waitfornetwork - -import ( - "github.com/rancher/os/cmd/network" - "os" - "os/signal" - "syscall" -) - -func handleTerm() { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGTERM) - <-c - os.Exit(0) -} - -func Main() { - go handleTerm() - if _, err := os.Stat(network.NETWORK_DONE); err == nil { - os.Exit(0) - } - select {} -} diff --git a/compose/project.go b/compose/project.go index d6f98dfc..37b4905e 100644 --- a/compose/project.go +++ b/compose/project.go @@ -3,27 +3,29 @@ package compose import ( "fmt" + "golang.org/x/net/context" + log "github.com/Sirupsen/logrus" yaml "github.com/cloudfoundry-incubator/candiedyaml" "github.com/docker/libcompose/cli/logger" + composeConfig "github.com/docker/libcompose/config" "github.com/docker/libcompose/docker" + composeClient "github.com/docker/libcompose/docker/client" "github.com/docker/libcompose/project" + "github.com/docker/libcompose/project/events" + "github.com/docker/libcompose/project/options" "github.com/rancher/os/config" rosDocker "github.com/rancher/os/docker" "github.com/rancher/os/util" "github.com/rancher/os/util/network" ) -func CreateService(cfg *config.CloudConfig, name string, serviceConfig *project.ServiceConfig) (project.Service, error) { +func CreateService(cfg *config.CloudConfig, name string, serviceConfig *composeConfig.ServiceConfigV1) (project.Service, error) { if cfg == nil { - var err error - cfg, err = config.LoadConfig() - if err != nil { - return nil, err - } + cfg = config.LoadConfig() } - p, err := CreateServiceSet("once", cfg, map[string]*project.ServiceConfig{ + p, err := CreateServiceSet("once", cfg, map[string]*composeConfig.ServiceConfigV1{ name: serviceConfig, }) if err != nil { @@ -33,8 +35,8 @@ func CreateService(cfg *config.CloudConfig, name string, serviceConfig *project. return p.CreateService(name) } -func CreateServiceSet(name string, cfg *config.CloudConfig, configs map[string]*project.ServiceConfig) (*project.Project, error) { - p, err := newProject(name, cfg, nil) +func CreateServiceSet(name string, cfg *config.CloudConfig, configs map[string]*composeConfig.ServiceConfigV1) (*project.Project, error) { + p, err := newProject(name, cfg, nil, nil) if err != nil { return nil, err } @@ -44,21 +46,22 @@ func CreateServiceSet(name string, cfg *config.CloudConfig, configs map[string]* return p, nil } -func RunServiceSet(name string, cfg *config.CloudConfig, configs map[string]*project.ServiceConfig) (*project.Project, error) { +func RunServiceSet(name string, cfg *config.CloudConfig, configs map[string]*composeConfig.ServiceConfigV1) (*project.Project, error) { p, err := CreateServiceSet(name, cfg, configs) if err != nil { return nil, err } - - return p, p.Up() + return p, p.Up(context.Background(), options.Up{ + Log: cfg.Rancher.Log, + }) } -func GetProject(cfg *config.CloudConfig, networkingAvailable bool) (*project.Project, error) { - return newCoreServiceProject(cfg, networkingAvailable) +func GetProject(cfg *config.CloudConfig, networkingAvailable, loadConsole bool) (*project.Project, error) { + return newCoreServiceProject(cfg, networkingAvailable, loadConsole) } -func newProject(name string, cfg *config.CloudConfig, environmentLookup project.EnvironmentLookup) (*project.Project, error) { - clientFactory, err := rosDocker.NewClientFactory(docker.ClientOpts{}) +func newProject(name string, cfg *config.CloudConfig, environmentLookup composeConfig.EnvironmentLookup, authLookup *rosDocker.ConfigAuthLookup) (*project.Project, error) { + clientFactory, err := rosDocker.NewClientFactory(composeClient.Options{}) if err != nil { return nil, err } @@ -66,31 +69,89 @@ func newProject(name string, cfg *config.CloudConfig, environmentLookup project. if environmentLookup == nil { environmentLookup = rosDocker.NewConfigEnvironment(cfg) } + if authLookup == nil { + authLookup = rosDocker.NewConfigAuthLookup(cfg) + } serviceFactory := &rosDocker.ServiceFactory{ Deps: map[string][]string{}, } context := &docker.Context{ ClientFactory: clientFactory, + AuthLookup: authLookup, Context: project.Context{ ProjectName: name, - NoRecreate: true, // for libcompose to not recreate on project reload, looping up the boot :) EnvironmentLookup: environmentLookup, ServiceFactory: serviceFactory, - Log: cfg.Rancher.Log, LoggerFactory: logger.NewColorLoggerFactory(), }, } serviceFactory.Context = context - return docker.NewProject(context) + authLookup.SetContext(context) + + return docker.NewProject(context, &composeConfig.ParseOptions{ + Interpolate: true, + Validate: false, + Preprocess: preprocessServiceMap, + }) } -func addServices(p *project.Project, enabled map[interface{}]interface{}, configs map[string]*project.ServiceConfig) map[interface{}]interface{} { +func preprocessServiceMap(serviceMap composeConfig.RawServiceMap) (composeConfig.RawServiceMap, error) { + newServiceMap := make(composeConfig.RawServiceMap) + + for k, v := range serviceMap { + newServiceMap[k] = make(composeConfig.RawService) + + for k2, v2 := range v { + if k2 == "environment" || k2 == "labels" { + newServiceMap[k][k2] = preprocess(v2, true) + } else { + newServiceMap[k][k2] = preprocess(v2, false) + } + + } + } + + return newServiceMap, nil +} + +func preprocess(item interface{}, replaceTypes bool) interface{} { + switch typedDatas := item.(type) { + + case map[interface{}]interface{}: + newMap := make(map[interface{}]interface{}) + + for key, value := range typedDatas { + newMap[key] = preprocess(value, replaceTypes) + } + return newMap + + case []interface{}: + // newArray := make([]interface{}, 0) will cause golint to complain + var newArray []interface{} + newArray = make([]interface{}, 0) + + for _, value := range typedDatas { + newArray = append(newArray, preprocess(value, replaceTypes)) + } + return newArray + + default: + if replaceTypes { + return fmt.Sprint(item) + } + return item + } +} + +func addServices(p *project.Project, enabled map[interface{}]interface{}, configs map[string]*composeConfig.ServiceConfigV1) map[interface{}]interface{} { + serviceConfigsV2, _ := composeConfig.ConvertServices(configs) + // Note: we ignore errors while loading services unchanged := true - for name, serviceConfig := range configs { - hash := project.GetServiceHash(name, serviceConfig) + for name, serviceConfig := range serviceConfigsV2 { + hash := composeConfig.GetServiceHash(name, serviceConfig) if enabled[name] == hash { continue @@ -123,71 +184,24 @@ func adjustContainerNames(m map[interface{}]interface{}) map[interface{}]interfa return m } -func newCoreServiceProject(cfg *config.CloudConfig, useNetwork bool) (*project.Project, error) { - projectEvents := make(chan project.Event) - enabled := map[interface{}]interface{}{} - +func newCoreServiceProject(cfg *config.CloudConfig, useNetwork, loadConsole bool) (*project.Project, error) { environmentLookup := rosDocker.NewConfigEnvironment(cfg) + authLookup := rosDocker.NewConfigAuthLookup(cfg) - p, err := newProject("os", cfg, environmentLookup) + p, err := newProject("os", cfg, environmentLookup, authLookup) if err != nil { return nil, err } + projectEvents := make(chan events.Event) p.AddListener(project.NewDefaultListener(p)) p.AddListener(projectEvents) - p.ReloadCallback = func() error { - var err error - cfg, err = config.LoadConfig() - if err != nil { - return err - } - - environmentLookup.SetConfig(cfg) - - enabled = addServices(p, enabled, cfg.Rancher.Services) - - for service, serviceEnabled := range cfg.Rancher.ServicesInclude { - if _, ok := enabled[service]; ok || !serviceEnabled { - continue - } - - bytes, err := LoadServiceResource(service, useNetwork, cfg) - if err != nil { - if err == network.ErrNoNetwork { - log.Debugf("Can not load %s, networking not enabled", service) - } else { - log.Errorf("Failed to load %s : %v", service, err) - } - continue - } - - m := map[interface{}]interface{}{} - if err := yaml.Unmarshal(bytes, &m); err != nil { - log.Errorf("Failed to parse YAML configuration: %s : %v", service, err) - continue - } - bytes, err = yaml.Marshal(adjustContainerNames(config.StringifyValues(m))) - if err != nil { - log.Errorf("Failed to marshal YAML configuration: %s : %v", service, err) - continue - } - err = p.Load(bytes) - if err != nil { - log.Errorf("Failed to load %s : %v", service, err) - continue - } - - enabled[service] = service - } - - return nil - } + p.ReloadCallback = projectReload(p, &useNetwork, loadConsole, environmentLookup, authLookup) go func() { for event := range projectEvents { - if event.EventType == project.EventContainerStarted && event.ServiceName == "ntp" { + if event.EventType == events.ContainerStarted && event.ServiceName == "ntp" { useNetwork = true } } @@ -203,13 +217,13 @@ func newCoreServiceProject(cfg *config.CloudConfig, useNetwork bool) (*project.P } func StageServices(cfg *config.CloudConfig, services ...string) error { - p, err := newProject("stage-services", cfg, nil) + p, err := newProject("stage-services", cfg, nil, nil) if err != nil { return err } for _, service := range services { - bytes, err := LoadServiceResource(service, true, cfg) + bytes, err := network.LoadServiceResource(service, true, cfg) if err != nil { return fmt.Errorf("Failed to load %s : %v", service, err) } @@ -219,29 +233,25 @@ func StageServices(cfg *config.CloudConfig, services ...string) error { return fmt.Errorf("Failed to parse YAML configuration: %s : %v", service, err) } - bytes, err = yaml.Marshal(config.StringifyValues(m)) + bytes, err = yaml.Marshal(m) if err != nil { - fmt.Errorf("Failed to marshal YAML configuration: %s : %v", service, err) + return fmt.Errorf("Failed to marshal YAML configuration: %s : %v", service, err) } err = p.Load(bytes) if err != nil { - fmt.Errorf("Failed to load %s : %v", service, err) + return fmt.Errorf("Failed to load %s : %v", service, err) } } // Reduce service configurations to just image and labels - for serviceName, serviceConfig := range p.Configs { - p.Configs[serviceName] = &project.ServiceConfig{ + for _, serviceName := range p.ServiceConfigs.Keys() { + serviceConfig, _ := p.ServiceConfigs.Get(serviceName) + p.ServiceConfigs.Add(serviceName, &composeConfig.ServiceConfig{ Image: serviceConfig.Image, Labels: serviceConfig.Labels, - } - + }) } - return p.Pull() -} - -func LoadServiceResource(name string, useNetwork bool, cfg *config.CloudConfig) ([]byte, error) { - return network.LoadResource(name, useNetwork, cfg.Rancher.Repositories.ToArray()) + return p.Pull(context.Background()) } diff --git a/compose/reload.go b/compose/reload.go new file mode 100644 index 00000000..616b605b --- /dev/null +++ b/compose/reload.go @@ -0,0 +1,74 @@ +package compose + +import ( + "fmt" + + log "github.com/Sirupsen/logrus" + yaml "github.com/cloudfoundry-incubator/candiedyaml" + "github.com/docker/libcompose/project" + "github.com/rancher/os/config" + "github.com/rancher/os/docker" + "github.com/rancher/os/util/network" +) + +func LoadService(p *project.Project, cfg *config.CloudConfig, useNetwork bool, service string) error { + bytes, err := network.LoadServiceResource(service, useNetwork, cfg) + if err != nil { + return err + } + + m := map[interface{}]interface{}{} + if err = yaml.Unmarshal(bytes, &m); err != nil { + return fmt.Errorf("Failed to parse YAML configuration for %s: %v", service, err) + } + + m = adjustContainerNames(m) + + bytes, err = yaml.Marshal(m) + if err != nil { + return fmt.Errorf("Failed to marshal YAML configuration for %s: %v", service, err) + } + + if err = p.Load(bytes); err != nil { + return fmt.Errorf("Failed to load %s: %v", service, err) + } + + return nil +} + +func projectReload(p *project.Project, useNetwork *bool, loadConsole bool, environmentLookup *docker.ConfigEnvironment, authLookup *docker.ConfigAuthLookup) func() error { + enabled := map[interface{}]interface{}{} + return func() error { + cfg := config.LoadConfig() + + environmentLookup.SetConfig(cfg) + authLookup.SetConfig(cfg) + + enabled = addServices(p, enabled, cfg.Rancher.Services) + + for service, serviceEnabled := range cfg.Rancher.ServicesInclude { + if _, ok := enabled[service]; ok || !serviceEnabled { + continue + } + + if err := LoadService(p, cfg, *useNetwork, service); err != nil { + if err != network.ErrNoNetwork { + log.Error(err) + } + continue + } + + enabled[service] = service + } + + if !loadConsole || cfg.Rancher.Console == "" || cfg.Rancher.Console == "default" { + return nil + } + + if err := LoadService(p, cfg, *useNetwork, cfg.Rancher.Console); err != nil && err != network.ErrNoNetwork { + log.Error(err) + } + + return nil + } +} diff --git a/config/config.go b/config/config.go index 959a124d..2760cfdf 100644 --- a/config/config.go +++ b/config/config.go @@ -1,135 +1,37 @@ package config import ( - "fmt" - - log "github.com/Sirupsen/logrus" yaml "github.com/cloudfoundry-incubator/candiedyaml" "github.com/rancher/os/util" ) -func (c *CloudConfig) Import(bytes []byte) (*CloudConfig, error) { - data, err := readConfig(bytes, false, CloudConfigPrivateFile) +func Merge(bytes []byte) error { + data, err := readConfigs(bytes, false, true) if err != nil { - return c, err + return err } - - return NewConfig().Merge(data) -} - -func (c *CloudConfig) MergeBytes(bytes []byte) (*CloudConfig, error) { - data, err := readConfig(bytes, false) + existing, err := readConfigs(nil, false, true, CloudConfigFile) if err != nil { - return c, err + return err } - return c.Merge(data) + return WriteToFile(util.Merge(existing, data), CloudConfigFile) } -var keysToStringify = []string{ - "command", - "dns", - "dns_search", - "entrypoint", - "env_file", - "environment", - "labels", - "links", -} - -func isPathToStringify(path []interface{}) bool { - l := len(path) - if l == 0 { - return false - } - if sk, ok := path[l-1].(string); ok { - return util.Contains(keysToStringify, sk) - } - return false -} - -func stringifyValue(data interface{}, path []interface{}) interface{} { - switch data := data.(type) { - case map[interface{}]interface{}: - result := make(map[interface{}]interface{}, len(data)) - if isPathToStringify(path) { - for k, v := range data { - switch v := v.(type) { - case []interface{}: - result[k] = stringifyValue(v, append(path, k)) - case map[interface{}]interface{}: - result[k] = stringifyValue(v, append(path, k)) - default: - result[k] = fmt.Sprint(v) - } - } - } else { - for k, v := range data { - result[k] = stringifyValue(v, append(path, k)) - } - } - return result - case []interface{}: - result := make([]interface{}, len(data)) - if isPathToStringify(path) { - for k, v := range data { - result[k] = fmt.Sprint(v) - } - } else { - for k, v := range data { - result[k] = stringifyValue(v, append(path, k)) - } - } - return result - default: - return data - } -} - -func StringifyValues(data map[interface{}]interface{}) map[interface{}]interface{} { - return stringifyValue(data, nil).(map[interface{}]interface{}) -} - -func (c *CloudConfig) Merge(values map[interface{}]interface{}) (*CloudConfig, error) { - d := map[interface{}]interface{}{} - if err := util.Convert(c, &d); err != nil { - return c, err - } - r := util.MapsUnion(d, StringifyValues(values)) - t := &CloudConfig{} - if err := util.Convert(r, t); err != nil { - return c, err - } - return t, nil -} - -func Dump(private, full bool) (string, error) { - var cfg *CloudConfig - var err error - - if full { - cfg, err = LoadConfig() - } else { - files := []string{CloudConfigBootFile, CloudConfigPrivateFile, CloudConfigFile} - if !private { - files = util.FilterStrings(files, func(x string) bool { return x != CloudConfigPrivateFile }) - } - cfg, err = ChainCfgFuncs(nil, - func(_ *CloudConfig) (*CloudConfig, error) { return ReadConfig(nil, true, files...) }, - amendNils, - ) +func Export(private, full bool) (string, error) { + rawCfg := loadRawDiskConfig(full) + if !private { + rawCfg = filterPrivateKeys(rawCfg) } - if err != nil { - return "", err - } - - bytes, err := yaml.Marshal(*cfg) + bytes, err := yaml.Marshal(rawCfg) return string(bytes), err } -func (c *CloudConfig) Get(key string) (interface{}, error) { +func Get(key string) (interface{}, error) { + cfg := LoadConfig() + data := map[interface{}]interface{}{} - if err := util.Convert(c, &data); err != nil { + if err := util.ConvertIgnoreOmitEmpty(cfg, &data); err != nil { return nil, err } @@ -137,58 +39,12 @@ func (c *CloudConfig) Get(key string) (interface{}, error) { return v, nil } -func (c *CloudConfig) GetIgnoreOmitEmpty(key string) (interface{}, error) { - data := map[interface{}]interface{}{} - if err := util.ConvertIgnoreOmitEmpty(c, &data); err != nil { - return nil, err - } - - v, _ := getOrSetVal(key, data, nil) - return v, nil -} - -func (c *CloudConfig) Set(key string, value interface{}) (map[interface{}]interface{}, error) { - data := map[interface{}]interface{}{} - _, data = getOrSetVal(key, data, value) - - return data, nil -} - -func (c *CloudConfig) Save(cfgDiffs ...map[interface{}]interface{}) error { - files := append([]string{OsConfigFile, OemConfigFile}, CloudConfigDirFiles()...) - files = util.FilterStrings(files, func(x string) bool { return x != CloudConfigPrivateFile }) - exCfg, err := ChainCfgFuncs(nil, - func(_ *CloudConfig) (*CloudConfig, error) { - return ReadConfig(nil, true, files...) - }, - readCmdline, - amendNils) +func Set(key string, value interface{}) error { + existing, err := readConfigs(nil, false, true, CloudConfigFile) if err != nil { return err } - exCfg = mergeMetadata(exCfg, readMetadata()) - - exData := map[interface{}]interface{}{} - if err := util.Convert(exCfg, &exData); err != nil { - return err - } - - data := map[interface{}]interface{}{} - if err := util.Convert(c, &data); err != nil { - return err - } - - data = util.MapsDifference(data, exData) - - // Apply any additional config diffs - for _, diff := range cfgDiffs { - data = util.MapsUnion(data, diff) - } - - log.WithFields(log.Fields{"diff": data}).Debug("The diff we're about to save") - if err := saveToDisk(data); err != nil { - return err - } - return nil + _, modified := getOrSetVal(key, existing, value) + return WriteToFile(modified, CloudConfigFile) } diff --git a/config/config_test.go b/config/config_test.go index cb7bbbf0..d258d108 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,13 +1,11 @@ package config import ( - "fmt" yaml "github.com/cloudfoundry-incubator/candiedyaml" "testing" "github.com/rancher/os/util" "github.com/stretchr/testify/require" - "strings" ) func TestFilterKey(t *testing.T) { @@ -55,104 +53,6 @@ func TestFilterKey(t *testing.T) { assert.Equal(expectedRest, rest) } -func TestStringifyValues(t *testing.T) { - assert := require.New(t) - data := map[interface{}]interface{}{ - "ssh_authorized_keys": []string{"pubk1", "pubk2"}, - "hostname": "ros-test", - "rancher": map[interface{}]interface{}{ - "services": map[interface{}]interface{}{ - "my-service": map[interface{}]interface{}{ - "command": []interface{}{"echo", 1, false, "nothing"}, - "labels": map[interface{}]interface{}{ - "some-bool": true, - "some-num": 42, - }, - "dsa-pub": "dsa-test2", - }, - }, - "docker": map[interface{}]interface{}{ - "ca_key": "ca_key-test3", - "ca_cert": "ca_cert-test4", - "args": []string{"args_test5"}, - }, - }, - } - expected := map[interface{}]interface{}{ - "ssh_authorized_keys": []string{"pubk1", "pubk2"}, - "hostname": "ros-test", - "rancher": map[interface{}]interface{}{ - "services": map[interface{}]interface{}{ - "my-service": map[interface{}]interface{}{ - "command": []interface{}{"echo", "1", "false", "nothing"}, - "labels": map[interface{}]interface{}{ - "some-bool": "true", - "some-num": "42", - }, - "dsa-pub": "dsa-test2", - }, - }, - "docker": map[interface{}]interface{}{ - "ca_key": "ca_key-test3", - "ca_cert": "ca_cert-test4", - "args": []string{"args_test5"}, - }, - }, - } - assert.Equal(expected, StringifyValues(data)) -} - -func TestFilterDottedKeys(t *testing.T) { - assert := require.New(t) - - data := map[interface{}]interface{}{ - "ssh_authorized_keys": []string{"pubk1", "pubk2"}, - "hostname": "ros-test", - "rancher": map[interface{}]interface{}{ - "ssh": map[interface{}]interface{}{ - "keys": map[interface{}]interface{}{ - "dsa": "dsa-test1", - "dsa-pub": "dsa-test2", - }, - }, - "docker": map[interface{}]interface{}{ - "ca_key": "ca_key-test3", - "ca_cert": "ca_cert-test4", - "args": []string{"args_test5"}, - }, - }, - } - expectedFiltered := map[interface{}]interface{}{ - "ssh_authorized_keys": []string{"pubk1", "pubk2"}, - "rancher": map[interface{}]interface{}{ - "ssh": map[interface{}]interface{}{ - "keys": map[interface{}]interface{}{ - "dsa": "dsa-test1", - "dsa-pub": "dsa-test2", - }, - }, - }, - } - expectedRest := map[interface{}]interface{}{ - "hostname": "ros-test", - "rancher": map[interface{}]interface{}{ - "docker": map[interface{}]interface{}{ - "ca_key": "ca_key-test3", - "ca_cert": "ca_cert-test4", - "args": []string{"args_test5"}, - }, - }, - } - - assert.Equal([]string{"rancher", "ssh"}, strings.Split("rancher.ssh", ".")) - assert.Equal([]string{"ssh_authorized_keys"}, strings.Split("ssh_authorized_keys", ".")) - - filtered, rest := filterDottedKeys(data, []string{"ssh_authorized_keys", "rancher.ssh"}) - - assert.Equal(expectedFiltered, filtered) - assert.Equal(expectedRest, rest) -} - func TestUnmarshalOrReturnString(t *testing.T) { assert := require.New(t) @@ -358,8 +258,6 @@ func TestUserDocker(t *testing.T) { err = util.Convert(config, &data) assert.Nil(err) - fmt.Println(data) - val, ok := data["rancher"].(map[interface{}]interface{})["docker"] assert.True(ok) diff --git a/config/data_funcs.go b/config/data_funcs.go index 2ab8bee1..5156d8a8 100644 --- a/config/data_funcs.go +++ b/config/data_funcs.go @@ -4,8 +4,9 @@ import ( log "github.com/Sirupsen/logrus" yaml "github.com/cloudfoundry-incubator/candiedyaml" - "github.com/rancher/os/util" "strings" + + "github.com/rancher/os/util" ) type CfgFunc func(*CloudConfig) (*CloudConfig, error) @@ -58,17 +59,12 @@ func filterKey(data map[interface{}]interface{}, key []string) (filtered, rest m return } -func filterDottedKeys(data map[interface{}]interface{}, keys []string) (filtered, rest map[interface{}]interface{}) { - filtered = map[interface{}]interface{}{} - rest = util.MapCopy(data) - - for _, key := range keys { - f, r := filterKey(data, strings.Split(key, ".")) - filtered = util.MapsUnion(filtered, f) - rest = util.MapsIntersection(rest, r) +func filterPrivateKeys(data map[interface{}]interface{}) map[interface{}]interface{} { + for _, privateKey := range PrivateKeys { + _, data = filterKey(data, strings.Split(privateKey, ".")) } - return + return data } func getOrSetVal(args string, data map[interface{}]interface{}, value interface{}) (interface{}, map[interface{}]interface{}) { diff --git a/config/disk.go b/config/disk.go index a9015ecd..0d990fa5 100644 --- a/config/disk.go +++ b/config/disk.go @@ -11,70 +11,60 @@ import ( yaml "github.com/cloudfoundry-incubator/candiedyaml" "github.com/coreos/coreos-cloudinit/datasource" "github.com/coreos/coreos-cloudinit/initialize" - "github.com/docker/libcompose/project" + "github.com/docker/engine-api/types" + composeConfig "github.com/docker/libcompose/config" "github.com/rancher/os/util" ) -var osConfig *CloudConfig - -func NewConfig() *CloudConfig { - if osConfig == nil { - osConfig, _ = ReadConfig(nil, true, OsConfigFile, OemConfigFile) - } - newCfg := *osConfig - return &newCfg -} - func ReadConfig(bytes []byte, substituteMetadataVars bool, files ...string) (*CloudConfig, error) { - if data, err := readConfig(bytes, substituteMetadataVars, files...); err == nil { - c := &CloudConfig{} - if err := util.Convert(data, c); err != nil { - return nil, err - } - c, _ = amendNils(c) - c, _ = amendContainerNames(c) - return c, nil - } else { + data, err := readConfigs(bytes, substituteMetadataVars, true, files...) + if err != nil { return nil, err } + + c := &CloudConfig{} + if err := util.Convert(data, c); err != nil { + return nil, err + } + c = amendNils(c) + c = amendContainerNames(c) + return c, nil } -func LoadConfig() (*CloudConfig, error) { - cfg, err := ChainCfgFuncs(NewConfig(), - readFilesAndMetadata, - readCmdline, - amendNils, - amendContainerNames) - if err != nil { - log.WithFields(log.Fields{"cfg": cfg, "err": err}).Error("Failed to load config") - return nil, err +func loadRawDiskConfig(full bool) map[interface{}]interface{} { + var rawCfg map[interface{}]interface{} + if full { + rawCfg, _ = readConfigs(nil, true, false, OsConfigFile, OemConfigFile) } - log.Debug("Merging cloud-config from meta-data and user-data") - cfg = mergeMetadata(cfg, readMetadata()) + files := append(CloudConfigDirFiles(), CloudConfigFile) + additionalCfgs, _ := readConfigs(nil, true, false, files...) - if cfg.Rancher.Debug { - log.SetLevel(log.DebugLevel) - if !util.Contains(cfg.Rancher.Docker.Args, "-D") { - cfg.Rancher.Docker.Args = append(cfg.Rancher.Docker.Args, "-D") - } - if !util.Contains(cfg.Rancher.SystemDocker.Args, "-D") { - cfg.Rancher.SystemDocker.Args = append(cfg.Rancher.SystemDocker.Args, "-D") - } - } else { - if util.Contains(cfg.Rancher.Docker.Args, "-D") { - cfg.Rancher.Docker.Args = util.FilterStrings(cfg.Rancher.Docker.Args, func(x string) bool { return x != "-D" }) - } - if util.Contains(cfg.Rancher.SystemDocker.Args, "-D") { - cfg.Rancher.SystemDocker.Args = util.FilterStrings(cfg.Rancher.SystemDocker.Args, func(x string) bool { return x != "-D" }) - } + return util.Merge(rawCfg, additionalCfgs) +} + +func loadRawConfig() map[interface{}]interface{} { + rawCfg := loadRawDiskConfig(true) + rawCfg = util.Merge(rawCfg, readCmdline()) + rawCfg = applyDebugFlags(rawCfg) + return mergeMetadata(rawCfg, readMetadata()) +} + +func LoadConfig() *CloudConfig { + rawCfg := loadRawConfig() + + cfg := &CloudConfig{} + if err := util.Convert(rawCfg, cfg); err != nil { + log.Errorf("Failed to parse configuration: %s", err) + return &CloudConfig{} } - - return cfg, nil + cfg = amendNils(cfg) + cfg = amendContainerNames(cfg) + return cfg } func CloudConfigDirFiles() []string { - files, err := util.DirLs(CloudConfigDir) + files, err := ioutil.ReadDir(CloudConfigDir) if err != nil { if os.IsNotExist(err) { // do nothing @@ -85,36 +75,56 @@ func CloudConfigDirFiles() []string { return []string{} } - files = util.Filter(files, func(x interface{}) bool { - f := x.(os.FileInfo) - if f.IsDir() || strings.HasPrefix(f.Name(), ".") { - return false + var finalFiles []string + for _, file := range files { + if !file.IsDir() && !strings.HasPrefix(file.Name(), ".") { + finalFiles = append(finalFiles, path.Join(CloudConfigDir, file.Name())) } - return true - }) + } - return util.ToStrings(util.Map(files, func(x interface{}) interface{} { - return path.Join(CloudConfigDir, x.(os.FileInfo).Name()) - })) + return finalFiles +} + +func applyDebugFlags(rawCfg map[interface{}]interface{}) map[interface{}]interface{} { + cfg := &CloudConfig{} + if err := util.Convert(rawCfg, cfg); err != nil { + return rawCfg + } + + if cfg.Rancher.Debug { + log.SetLevel(log.DebugLevel) + if !util.Contains(cfg.Rancher.Docker.Args, "-D") { + cfg.Rancher.Docker.Args = append(cfg.Rancher.Docker.Args, "-D") + } + if !util.Contains(cfg.Rancher.SystemDocker.Args, "-D") { + cfg.Rancher.SystemDocker.Args = append(cfg.Rancher.SystemDocker.Args, "-D") + } + } + + _, rawCfg = getOrSetVal("rancher.docker.args", rawCfg, cfg.Rancher.Docker.Args) + _, rawCfg = getOrSetVal("rancher.system_docker.args", rawCfg, cfg.Rancher.SystemDocker.Args) + return rawCfg } // mergeMetadata merges certain options from md (meta-data from the datasource) // onto cc (a CloudConfig derived from user-data), if they are not already set // on cc (i.e. user-data always takes precedence) -func mergeMetadata(cc *CloudConfig, md datasource.Metadata) *CloudConfig { - if cc == nil { - return cc +func mergeMetadata(rawCfg map[interface{}]interface{}, md datasource.Metadata) map[interface{}]interface{} { + if rawCfg == nil { + return nil + } + out := util.MapCopy(rawCfg) + + outHostname, ok := out["hostname"] + if !ok { + outHostname = "" } - out := cc - dirty := false if md.Hostname != "" { - if out.Hostname != "" { - log.Debugf("Warning: user-data hostname (%s) overrides metadata hostname (%s)\n", out.Hostname, md.Hostname) + if outHostname != "" { + log.Debugf("Warning: user-data hostname (%s) overrides metadata hostname (%s)\n", outHostname, md.Hostname) } else { - out = &(*cc) - dirty = true - out.Hostname = md.Hostname + out["hostname"] = md.Hostname } } @@ -126,14 +136,18 @@ func mergeMetadata(cc *CloudConfig, md datasource.Metadata) *CloudConfig { sort.Sort(sort.StringSlice(keys)) - for _, k := range keys { - if !dirty { - out = &(*cc) - dirty = true - } - out.SSHAuthorizedKeys = append(out.SSHAuthorizedKeys, md.SSHPublicKeys[k]) + currentKeys, ok := out["ssh_authorized_keys"] + if !ok { + return out } + finalKeys := currentKeys.([]interface{}) + for _, k := range keys { + finalKeys = append(finalKeys, md.SSHPublicKeys[k]) + } + + out["ssh_authorized_keys"] = finalKeys + return out } @@ -145,68 +159,50 @@ func readMetadata() datasource.Metadata { return metadata } -func readFilesAndMetadata(c *CloudConfig) (*CloudConfig, error) { - files := append(CloudConfigDirFiles(), CloudConfigFile) - data, err := readConfig(nil, true, files...) - if err != nil { - log.WithFields(log.Fields{"err": err, "files": files}).Error("Error reading config files") - return c, err - } - - t, err := c.Merge(data) - if err != nil { - log.WithFields(log.Fields{"cfg": c, "data": data, "err": err}).Error("Error merging config data") - return c, err - } - - return t, nil -} - -func readCmdline(c *CloudConfig) (*CloudConfig, error) { +func readCmdline() map[interface{}]interface{} { log.Debug("Reading config cmdline") cmdLine, err := ioutil.ReadFile("/proc/cmdline") if err != nil { log.WithFields(log.Fields{"err": err}).Error("Failed to read kernel params") - return c, err + return nil } if len(cmdLine) == 0 { - return c, nil + return nil } log.Debugf("Config cmdline %s", cmdLine) cmdLineObj := parseCmdline(strings.TrimSpace(string(cmdLine))) - t, err := c.Merge(cmdLineObj) - if err != nil { - log.WithFields(log.Fields{"cfg": c, "cmdLine": cmdLine, "data": cmdLineObj, "err": err}).Warn("Error adding kernel params to config") - } - return t, nil + return cmdLineObj } -func amendNils(c *CloudConfig) (*CloudConfig, error) { +func amendNils(c *CloudConfig) *CloudConfig { t := *c if t.Rancher.Environment == nil { t.Rancher.Environment = map[string]string{} } if t.Rancher.Autoformat == nil { - t.Rancher.Autoformat = map[string]*project.ServiceConfig{} + t.Rancher.Autoformat = map[string]*composeConfig.ServiceConfigV1{} } if t.Rancher.BootstrapContainers == nil { - t.Rancher.BootstrapContainers = map[string]*project.ServiceConfig{} + t.Rancher.BootstrapContainers = map[string]*composeConfig.ServiceConfigV1{} } if t.Rancher.Services == nil { - t.Rancher.Services = map[string]*project.ServiceConfig{} + t.Rancher.Services = map[string]*composeConfig.ServiceConfigV1{} } if t.Rancher.ServicesInclude == nil { t.Rancher.ServicesInclude = map[string]bool{} } - return &t, nil + if t.Rancher.RegistryAuths == nil { + t.Rancher.RegistryAuths = map[string]types.AuthConfig{} + } + return &t } -func amendContainerNames(c *CloudConfig) (*CloudConfig, error) { - for _, scm := range []map[string]*project.ServiceConfig{ +func amendContainerNames(c *CloudConfig) *CloudConfig { + for _, scm := range []map[string]*composeConfig.ServiceConfigV1{ c.Rancher.Autoformat, c.Rancher.BootstrapContainers, c.Rancher.Services, @@ -215,7 +211,7 @@ func amendContainerNames(c *CloudConfig) (*CloudConfig, error) { v.ContainerName = k } } - return c, nil + return c } func WriteToFile(data interface{}, filename string) error { @@ -224,27 +220,10 @@ func WriteToFile(data interface{}, filename string) error { return err } - return ioutil.WriteFile(filename, content, 400) + return util.WriteFileAtomic(filename, content, 400) } -func saveToDisk(data map[interface{}]interface{}) error { - private, config := filterDottedKeys(data, []string{ - "rancher.ssh", - "rancher.docker.ca_key", - "rancher.docker.ca_cert", - "rancher.docker.server_key", - "rancher.docker.server_cert", - }) - - err := WriteToFile(config, CloudConfigFile) - if err != nil { - return err - } - - return WriteToFile(private, CloudConfigPrivateFile) -} - -func readConfig(bytes []byte, substituteMetadataVars bool, files ...string) (map[interface{}]interface{}, error) { +func readConfigs(bytes []byte, substituteMetadataVars, returnErr bool, files ...string) (map[interface{}]interface{}, error) { // You can't just overlay yaml bytes on to maps, it won't merge, but instead // just override the keys and not merge the map values. left := make(map[interface{}]interface{}) @@ -252,7 +231,11 @@ func readConfig(bytes []byte, substituteMetadataVars bool, files ...string) (map for _, file := range files { content, err := readConfigFile(file) if err != nil { - return nil, err + if returnErr { + return nil, err + } + log.Errorf("Failed to read config file %s: %s", file, err) + continue } if len(content) == 0 { continue @@ -264,24 +247,53 @@ func readConfig(bytes []byte, substituteMetadataVars bool, files ...string) (map right := make(map[interface{}]interface{}) err = yaml.Unmarshal(content, &right) if err != nil { - return nil, err + if returnErr { + return nil, err + } + log.Errorf("Failed to parse config file %s: %s", file, err) + continue } - left = util.MapsUnion(left, right) + // Verify there are no issues converting to CloudConfig + c := &CloudConfig{} + if err := util.Convert(right, c); err != nil { + if returnErr { + return nil, err + } + log.Errorf("Failed to parse config file %s: %s", file, err) + continue + } + + left = util.Merge(left, right) } - if bytes != nil && len(bytes) > 0 { - right := make(map[interface{}]interface{}) - if substituteMetadataVars { - bytes = substituteVars(bytes, metadata) - } - if err := yaml.Unmarshal(bytes, &right); err != nil { - return nil, err - } - - left = util.MapsUnion(left, right) + if bytes == nil || len(bytes) == 0 { + return left, nil } + right := make(map[interface{}]interface{}) + if substituteMetadataVars { + bytes = substituteVars(bytes, metadata) + } + + if err := yaml.Unmarshal(bytes, &right); err != nil { + if returnErr { + return nil, err + } + log.Errorf("Failed to parse bytes: %s", err) + return left, nil + } + + c := &CloudConfig{} + if err := util.Convert(right, c); err != nil { + if returnErr { + return nil, err + } + log.Errorf("Failed to parse bytes: %s", err) + return left, nil + } + + left = util.Merge(left, right) return left, nil } diff --git a/config/types.go b/config/types.go index d7ef4dad..d4744216 100644 --- a/config/types.go +++ b/config/types.go @@ -1,10 +1,12 @@ package config import ( - "github.com/coreos/coreos-cloudinit/config" - "github.com/docker/libcompose/project" - "github.com/rancher/netconf" "runtime" + + "github.com/coreos/coreos-cloudinit/config" + "github.com/docker/engine-api/types" + composeConfig "github.com/docker/libcompose/config" + "github.com/rancher/netconf" ) const ( @@ -21,6 +23,7 @@ const ( MODULES_ARCHIVE = "/modules.tar" DEBUG = false SYSTEM_DOCKER_LOG = "/var/log/system-docker.log" + SYSTEM_DOCKER_BIN = "/usr/bin/system-docker" LABEL = "label" HASH = "io.rancher.os.hash" @@ -28,6 +31,7 @@ const ( DETACH = "io.rancher.os.detach" CREATE_ONLY = "io.rancher.os.createonly" RELOAD_CONFIG = "io.rancher.os.reloadconfig" + CONSOLE = "io.rancher.os.console" SCOPE = "io.rancher.os.scope" REBUILD = "io.docker.compose.rebuild" SYSTEM = "system" @@ -35,7 +39,6 @@ const ( OsConfigFile = "/usr/share/ros/os-config.yml" CloudConfigDir = "/var/lib/rancher/conf/cloud-config.d" CloudConfigBootFile = "/var/lib/rancher/conf/cloud-config.d/boot.yml" - CloudConfigPrivateFile = "/var/lib/rancher/conf/cloud-config.d/private.yml" CloudConfigNetworkFile = "/var/lib/rancher/conf/cloud-config.d/network.yml" CloudConfigScriptFile = "/var/lib/rancher/conf/cloud-config-script" MetaDataFile = "/var/lib/rancher/conf/metadata" @@ -47,6 +50,13 @@ var ( VERSION string ARCH string SUFFIX string + PrivateKeys = []string{ + "rancher.ssh", + "rancher.docker.ca_key", + "rancher.docker.ca_cert", + "rancher.docker.server_key", + "rancher.docker.server_cert", + } ) func init() { @@ -71,33 +81,36 @@ type CloudConfig struct { SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"` WriteFiles []config.File `yaml:"write_files"` Hostname string `yaml:"hostname"` - DefaultHostname string `yaml:"default_hostname"` Rancher RancherConfig `yaml:"rancher,omitempty"` } type RancherConfig struct { - Environment map[string]string `yaml:"environment,omitempty"` - Services map[string]*project.ServiceConfig `yaml:"services,omitempty"` - BootstrapContainers map[string]*project.ServiceConfig `yaml:"bootstrap,omitempty"` - Autoformat map[string]*project.ServiceConfig `yaml:"autoformat,omitempty"` - BootstrapDocker DockerConfig `yaml:"bootstrap_docker,omitempty"` - CloudInit CloudInit `yaml:"cloud_init,omitempty"` - Debug bool `yaml:"debug,omitempty"` - RmUsr bool `yaml:"rm_usr,omitempty"` - Log bool `yaml:"log,omitempty"` - ForceConsoleRebuild bool `yaml:"force_console_rebuild,omitempty"` - Disable []string `yaml:"disable,omitempty"` - ServicesInclude map[string]bool `yaml:"services_include,omitempty"` - Modules []string `yaml:"modules,omitempty"` - Network netconf.NetworkConfig `yaml:"network,omitempty"` - DefaultNetwork netconf.NetworkConfig `yaml:"default_network,omitempty"` - Repositories Repositories `yaml:"repositories,omitempty"` - Ssh SshConfig `yaml:"ssh,omitempty"` - State StateConfig `yaml:"state,omitempty"` - SystemDocker DockerConfig `yaml:"system_docker,omitempty"` - Upgrade UpgradeConfig `yaml:"upgrade,omitempty"` - Docker DockerConfig `yaml:"docker,omitempty"` + Console string `yaml:"console,omitempty"` + Environment map[string]string `yaml:"environment,omitempty"` + Services map[string]*composeConfig.ServiceConfigV1 `yaml:"services,omitempty"` + BootstrapContainers map[string]*composeConfig.ServiceConfigV1 `yaml:"bootstrap,omitempty"` + Autoformat map[string]*composeConfig.ServiceConfigV1 `yaml:"autoformat,omitempty"` + BootstrapDocker DockerConfig `yaml:"bootstrap_docker,omitempty"` + CloudInit CloudInit `yaml:"cloud_init,omitempty"` + Debug bool `yaml:"debug,omitempty"` + RmUsr bool `yaml:"rm_usr,omitempty"` + Log bool `yaml:"log,omitempty"` + ForceConsoleRebuild bool `yaml:"force_console_rebuild,omitempty"` + Disable []string `yaml:"disable,omitempty"` + ServicesInclude map[string]bool `yaml:"services_include,omitempty"` + Modules []string `yaml:"modules,omitempty"` + Network netconf.NetworkConfig `yaml:"network,omitempty"` + DefaultNetwork netconf.NetworkConfig `yaml:"default_network,omitempty"` + Repositories Repositories `yaml:"repositories,omitempty"` + Ssh SshConfig `yaml:"ssh,omitempty"` + State StateConfig `yaml:"state,omitempty"` + SystemDocker DockerConfig `yaml:"system_docker,omitempty"` + Upgrade UpgradeConfig `yaml:"upgrade,omitempty"` + Docker DockerConfig `yaml:"docker,omitempty"` + RegistryAuths map[string]types.AuthConfig `yaml:"registry_auths,omitempty"` + Defaults Defaults `yaml:"defaults,omitempty"` + ResizeDevice string `yaml:"resize_device,omitempty"` } type UpgradeConfig struct { @@ -141,6 +154,11 @@ type CloudInit struct { Datasources []string `yaml:"datasources,omitempty"` } +type Defaults struct { + Hostname string `yaml:"hostname,omitempty"` + Network netconf.NetworkConfig `yaml:"network,omitempty"` +} + func (r Repositories) ToArray() []string { result := make([]string, 0, len(r)) for _, repo := range r { diff --git a/docker/auth.go b/docker/auth.go new file mode 100644 index 00000000..c4a086ad --- /dev/null +++ b/docker/auth.go @@ -0,0 +1,82 @@ +package docker + +import ( + "encoding/base64" + "fmt" + "strings" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + "github.com/docker/libcompose/docker" + "github.com/rancher/os/config" +) + +// ConfigAuthLookup will lookup registry auth info from cloud config +// if a context is set, it will also lookup auth info from the Docker config file +type ConfigAuthLookup struct { + cfg *config.CloudConfig + context *docker.Context + dockerConfigAuthLookup *docker.ConfigAuthLookup +} + +func NewConfigAuthLookup(cfg *config.CloudConfig) *ConfigAuthLookup { + return &ConfigAuthLookup{ + cfg: cfg, + } +} + +func populateRemaining(authConfig *types.AuthConfig) error { + if authConfig.Auth == "" { + return nil + } + + decoded, err := base64.URLEncoding.DecodeString(authConfig.Auth) + if err != nil { + return err + } + + decodedSplit := strings.Split(string(decoded), ":") + if len(decodedSplit) != 2 { + return fmt.Errorf("Invalid auth: %s", authConfig.Auth) + } + + authConfig.Username = decodedSplit[0] + authConfig.Password = decodedSplit[1] + + return nil +} + +func (c *ConfigAuthLookup) SetConfig(cfg *config.CloudConfig) { + c.cfg = cfg +} + +func (c *ConfigAuthLookup) SetContext(context *docker.Context) { + c.context = context + c.dockerConfigAuthLookup = docker.NewConfigAuthLookup(context) +} + +func (c *ConfigAuthLookup) Lookup(repoInfo *registry.RepositoryInfo) types.AuthConfig { + if repoInfo == nil || repoInfo.Index == nil { + return types.AuthConfig{} + } + authConfig := registry.ResolveAuthConfig(c.All(), repoInfo.Index) + + err := populateRemaining(&authConfig) + if err != nil { + log.Error(err) + return types.AuthConfig{} + } + + return authConfig +} + +func (c *ConfigAuthLookup) All() map[string]types.AuthConfig { + registryAuths := c.cfg.Rancher.RegistryAuths + if c.dockerConfigAuthLookup != nil { + for registry, authConfig := range c.dockerConfigAuthLookup.All() { + registryAuths[registry] = authConfig + } + } + return registryAuths +} diff --git a/docker/client.go b/docker/client.go index d8d3cc84..b49f39e1 100644 --- a/docker/client.go +++ b/docker/client.go @@ -1,26 +1,27 @@ package docker import ( - dockerClient "github.com/fsouza/go-dockerclient" + dockerClient "github.com/docker/engine-api/client" "github.com/rancher/os/config" + "golang.org/x/net/context" ) -func NewSystemClient() (*dockerClient.Client, error) { +func NewSystemClient() (dockerClient.APIClient, error) { return NewClient(config.DOCKER_SYSTEM_HOST) } -func NewDefaultClient() (*dockerClient.Client, error) { +func NewDefaultClient() (dockerClient.APIClient, error) { return NewClient(config.DOCKER_HOST) } -func NewClient(endpoint string) (*dockerClient.Client, error) { - client, err := dockerClient.NewClient(endpoint) +func NewClient(endpoint string) (dockerClient.APIClient, error) { + client, err := dockerClient.NewClient(endpoint, "", nil, nil) if err != nil { return nil, err } err = ClientOK(endpoint, func() bool { - _, err := client.Info() + _, err := client.Info(context.Background()) return err == nil }) diff --git a/docker/client_factory.go b/docker/client_factory.go index b0f479b1..cc12dfcd 100644 --- a/docker/client_factory.go +++ b/docker/client_factory.go @@ -4,34 +4,36 @@ import ( "fmt" "sync" + "golang.org/x/net/context" + log "github.com/Sirupsen/logrus" - "github.com/docker/libcompose/docker" + dockerclient "github.com/docker/engine-api/client" + composeClient "github.com/docker/libcompose/docker/client" "github.com/docker/libcompose/project" - dockerclient "github.com/fsouza/go-dockerclient" "github.com/rancher/os/config" "github.com/rancher/os/util" ) type ClientFactory struct { - userClient *dockerclient.Client - systemClient *dockerclient.Client + userClient dockerclient.APIClient + systemClient dockerclient.APIClient userOnce sync.Once systemOnce sync.Once } -func NewClientFactory(opts docker.ClientOpts) (docker.ClientFactory, error) { +func NewClientFactory(opts composeClient.Options) (project.ClientFactory, error) { userOpts := opts systemOpts := opts userOpts.Host = config.DOCKER_HOST systemOpts.Host = config.DOCKER_SYSTEM_HOST - userClient, err := docker.CreateClient(userOpts) + userClient, err := composeClient.Create(userOpts) if err != nil { return nil, err } - systemClient, err := docker.CreateClient(systemOpts) + systemClient, err := composeClient.Create(systemOpts) if err != nil { return nil, err } @@ -42,7 +44,7 @@ func NewClientFactory(opts docker.ClientOpts) (docker.ClientFactory, error) { }, nil } -func (c *ClientFactory) Create(service project.Service) *dockerclient.Client { +func (c *ClientFactory) Create(service project.Service) dockerclient.APIClient { if IsSystemContainer(service.Config()) { waitFor(&c.systemOnce, c.systemClient, config.DOCKER_SYSTEM_HOST) return c.systemClient @@ -52,10 +54,10 @@ func (c *ClientFactory) Create(service project.Service) *dockerclient.Client { return c.userClient } -func waitFor(once *sync.Once, client *dockerclient.Client, endpoint string) { +func waitFor(once *sync.Once, client dockerclient.APIClient, endpoint string) { once.Do(func() { err := ClientOK(endpoint, func() bool { - _, err := client.Info() + _, err := client.Info(context.Background()) return err == nil }) if err != nil { diff --git a/docker/env.go b/docker/env.go index 907c167e..db8d8f3a 100644 --- a/docker/env.go +++ b/docker/env.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/docker/libcompose/project" + composeConfig "github.com/docker/libcompose/config" "github.com/rancher/os/config" ) @@ -72,7 +72,7 @@ func (c *ConfigEnvironment) SetConfig(cfg *config.CloudConfig) { c.cfg = cfg } -func (c *ConfigEnvironment) Lookup(key, serviceName string, serviceConfig *project.ServiceConfig) []string { +func (c *ConfigEnvironment) Lookup(key, serviceName string, serviceConfig *composeConfig.ServiceConfig) []string { fullKey := fmt.Sprintf("%s/%s", serviceName, key) return lookupKeys(c.cfg, fullKey, key) } diff --git a/docker/service.go b/docker/service.go index c9ffadf6..06bacbb9 100644 --- a/docker/service.go +++ b/docker/service.go @@ -1,11 +1,17 @@ package docker import ( + "fmt" + "github.com/Sirupsen/logrus" + dockerclient "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + composeConfig "github.com/docker/libcompose/config" "github.com/docker/libcompose/docker" "github.com/docker/libcompose/project" - dockerclient "github.com/fsouza/go-dockerclient" + "github.com/docker/libcompose/project/options" "github.com/rancher/os/config" + "golang.org/x/net/context" ) type Service struct { @@ -15,7 +21,7 @@ type Service struct { project *project.Project } -func NewService(factory *ServiceFactory, name string, serviceConfig *project.ServiceConfig, context *docker.Context, project *project.Project) *Service { +func NewService(factory *ServiceFactory, name string, serviceConfig *composeConfig.ServiceConfig, context *docker.Context, project *project.Project) *Service { return &Service{ Service: docker.NewService(name, serviceConfig, context), deps: factory.Deps, @@ -50,20 +56,20 @@ func (s *Service) missingImage() bool { return false } client := s.context.ClientFactory.Create(s) - i, err := client.InspectImage(s.Config().Image) - return err != nil || i == nil + _, _, err := client.ImageInspectWithRaw(context.Background(), s.Config().Image, false) + return err != nil } func (s *Service) requiresSyslog() bool { - return s.Config().LogDriver == "syslog" + return s.Config().Logging.Driver == "syslog" } func (s *Service) requiresUserDocker() bool { - return s.Config().Labels.MapParts()[config.SCOPE] != config.SYSTEM + return s.Config().Labels[config.SCOPE] != config.SYSTEM } func appendLink(deps []project.ServiceRelationship, name string, optional bool, p *project.Project) []project.ServiceRelationship { - if _, ok := p.Configs[name]; !ok { + if _, ok := p.ServiceConfigs.Get(name); !ok { return deps } rel := project.NewServiceRelationship(name, project.RelTypeLink) @@ -71,29 +77,26 @@ func appendLink(deps []project.ServiceRelationship, name string, optional bool, return append(deps, rel) } -func (s *Service) shouldRebuild() (bool, error) { - containers, err := s.Containers() - if err != nil { - return false, err - } - cfg, err := config.LoadConfig() +func (s *Service) shouldRebuild(ctx context.Context) (bool, error) { + containers, err := s.Containers(ctx) if err != nil { return false, err } + cfg := config.LoadConfig() for _, c := range containers { - outOfSync, err := c.(*docker.Container).OutOfSync(s.Service.Config().Image) + outOfSync, err := c.(*docker.Container).OutOfSync(ctx, s.Service.Config().Image) if err != nil { return false, err } - _, containerInfo, err := s.getContainer() - if containerInfo == nil || err != nil { + _, containerInfo, err := s.getContainer(ctx) + if err != nil { return false, err } name := containerInfo.Name[1:] origRebuildLabel := containerInfo.Config.Labels[config.REBUILD] - newRebuildLabel := s.Config().Labels.MapParts()[config.REBUILD] + newRebuildLabel := s.Config().Labels[config.REBUILD] rebuildLabelChanged := newRebuildLabel != origRebuildLabel logrus.WithFields(logrus.Fields{ "origRebuildLabel": origRebuildLabel, @@ -101,59 +104,66 @@ func (s *Service) shouldRebuild() (bool, error) { "rebuildLabelChanged": rebuildLabelChanged, "outOfSync": outOfSync}).Debug("Rebuild values") - rebuilding := false + if newRebuildLabel == "always" { + return true, nil + } if outOfSync { - if cfg.Rancher.ForceConsoleRebuild && s.Name() == "console" { - cfg.Rancher.ForceConsoleRebuild = false - if err := cfg.Save(); err != nil { - return false, err + if s.Name() == "console" { + if cfg.Rancher.ForceConsoleRebuild { + if err := config.Set("rancher.force_console_rebuild", false); err != nil { + return false, err + } + return true, nil } - rebuilding = true - } else if origRebuildLabel == "always" || rebuildLabelChanged || origRebuildLabel != "false" { - rebuilding = true + origConsoleLabel := containerInfo.Config.Labels[config.CONSOLE] + newConsoleLabel := s.Config().Labels[config.CONSOLE] + if newConsoleLabel != origConsoleLabel { + return true, nil + } + } else if rebuildLabelChanged || origRebuildLabel != "false" { + return true, nil } else { logrus.Warnf("%s needs rebuilding", name) } } - - if rebuilding { - logrus.Infof("Rebuilding %s", name) - return true, nil - } } return false, nil } -func (s *Service) Up() error { - labels := s.Config().Labels.MapParts() +func (s *Service) Up(ctx context.Context, options options.Up) error { + labels := s.Config().Labels - if err := s.Service.Create(); err != nil { + if err := s.Service.Create(ctx, options.Create); err != nil { return err } - shouldRebuild, err := s.shouldRebuild() + + shouldRebuild, err := s.shouldRebuild(ctx) if err != nil { return err } if shouldRebuild { - cs, err := s.Service.Containers() + logrus.Infof("Rebuilding %s", s.Name()) + cs, err := s.Service.Containers(ctx) if err != nil { return err } for _, c := range cs { - if _, err := c.(*docker.Container).Recreate(s.Config().Image); err != nil { + if _, err := c.(*docker.Container).Recreate(ctx, s.Config().Image); err != nil { return err } } - s.rename() + if err = s.rename(ctx); err != nil { + return err + } } if labels[config.CREATE_ONLY] == "true" { return s.checkReload(labels) } - if err := s.Service.Up(); err != nil { + if err := s.Service.Up(ctx, options); err != nil { return err } if labels[config.DETACH] == "false" { - if err := s.wait(); err != nil { + if err := s.wait(ctx); err != nil { return err } } @@ -168,52 +178,53 @@ func (s *Service) checkReload(labels map[string]string) error { return nil } -func (s *Service) Create() error { - return s.Service.Create() +func (s *Service) Create(ctx context.Context, options options.Create) error { + return s.Service.Create(ctx, options) } -func (s *Service) getContainer() (*dockerclient.Client, *dockerclient.Container, error) { - containers, err := s.Service.Containers() +func (s *Service) getContainer(ctx context.Context) (dockerclient.APIClient, types.ContainerJSON, error) { + containers, err := s.Service.Containers(ctx) + if err != nil { - return nil, nil, err + return nil, types.ContainerJSON{}, err } if len(containers) == 0 { - return nil, nil, nil + return nil, types.ContainerJSON{}, fmt.Errorf("No containers found for %s", s.Name()) } id, err := containers[0].ID() if err != nil { - return nil, nil, err + return nil, types.ContainerJSON{}, err } client := s.context.ClientFactory.Create(s) - info, err := client.InspectContainer(id) + info, err := client.ContainerInspect(context.Background(), id) return client, info, err } -func (s *Service) wait() error { - client, info, err := s.getContainer() - if err != nil || info == nil { +func (s *Service) wait(ctx context.Context) error { + client, info, err := s.getContainer(ctx) + if err != nil { return err } - if _, err := client.WaitContainer(info.ID); err != nil { + if _, err := client.ContainerWait(context.Background(), info.ID); err != nil { return err } return nil } -func (s *Service) rename() error { - client, info, err := s.getContainer() - if err != nil || info == nil { +func (s *Service) rename(ctx context.Context) error { + client, info, err := s.getContainer(ctx) + if err != nil { return err } if len(info.Name) > 0 && info.Name[1:] != s.Name() { logrus.Debugf("Renaming container %s => %s", info.Name[1:], s.Name()) - return client.RenameContainer(dockerclient.RenameContainerOptions{ID: info.ID, Name: s.Name()}) + return client.ContainerRename(context.Background(), info.ID, s.Name()) } else { return nil } diff --git a/docker/service_factory.go b/docker/service_factory.go index 5835929c..6126aa0d 100644 --- a/docker/service_factory.go +++ b/docker/service_factory.go @@ -1,6 +1,7 @@ package docker import ( + composeConfig "github.com/docker/libcompose/config" "github.com/docker/libcompose/docker" "github.com/docker/libcompose/project" "github.com/rancher/os/util" @@ -11,13 +12,13 @@ type ServiceFactory struct { Deps map[string][]string } -func (s *ServiceFactory) Create(project *project.Project, name string, serviceConfig *project.ServiceConfig) (project.Service, error) { - if after := serviceConfig.Labels.MapParts()["io.rancher.os.after"]; after != "" { +func (s *ServiceFactory) Create(project *project.Project, name string, serviceConfig *composeConfig.ServiceConfig) (project.Service, error) { + if after := serviceConfig.Labels["io.rancher.os.after"]; after != "" { for _, dep := range util.TrimSplit(after, ",") { s.Deps[name] = append(s.Deps[name], dep) } } - if before := serviceConfig.Labels.MapParts()["io.rancher.os.before"]; before != "" { + if before := serviceConfig.Labels["io.rancher.os.before"]; before != "" { for _, dep := range util.TrimSplit(before, ",") { s.Deps[dep] = append(s.Deps[dep], name) } diff --git a/docker/util.go b/docker/util.go index 52ebdf3b..71622281 100644 --- a/docker/util.go +++ b/docker/util.go @@ -1,11 +1,11 @@ package docker import ( - "github.com/docker/libcompose/project" + composeConfig "github.com/docker/libcompose/config" "github.com/rancher/os/config" ) -func IsSystemContainer(serviceConfig *project.ServiceConfig) bool { - return serviceConfig.Labels.MapParts()[config.SCOPE] == config.SYSTEM +func IsSystemContainer(serviceConfig *composeConfig.ServiceConfig) bool { + return serviceConfig.Labels[config.SCOPE] == config.SYSTEM } diff --git a/docs/rancheros.png b/docs/rancheros.png index c29a4779..4fed17d0 100644 Binary files a/docs/rancheros.png and b/docs/rancheros.png differ diff --git a/hostname/hostname.go b/hostname/hostname.go index 5fd16f0f..f7d0b299 100644 --- a/hostname/hostname.go +++ b/hostname/hostname.go @@ -2,17 +2,18 @@ package hostname import ( "bufio" - "github.com/rancher/os/config" "io/ioutil" "os" "strings" "syscall" + + "github.com/rancher/os/config" ) func SetHostnameFromCloudConfig(cc *config.CloudConfig) error { var hostname string if cc.Hostname == "" { - hostname = cc.DefaultHostname + hostname = cc.Rancher.Defaults.Hostname } else { hostname = cc.Hostname } diff --git a/images/00-rootfs/.dockerignore b/images/00-rootfs/.dockerignore new file mode 100644 index 00000000..3c5f1711 --- /dev/null +++ b/images/00-rootfs/.dockerignore @@ -0,0 +1,2 @@ +assets +build/dist/kernel diff --git a/images/00-rootfs/Dockerfile b/images/00-rootfs/Dockerfile new file mode 100644 index 00000000..242b0876 --- /dev/null +++ b/images/00-rootfs/Dockerfile @@ -0,0 +1,2 @@ +FROM scratch +ADD build/rootfs.tar / diff --git a/images/01-base/Dockerfile b/images/01-base/Dockerfile new file mode 100644 index 00000000..f0e37f76 --- /dev/null +++ b/images/01-base/Dockerfile @@ -0,0 +1,38 @@ +FROM rancher/os-rootfs +RUN ln -s /dev/null /etc/udev/rules.d/80-net-name-slot.rules +# Cleanup Buildroot +RUN rm /sbin/poweroff /sbin/reboot /sbin/halt && \ + sed -i '/^root/s!/bin/sh!/bin/bash!' /etc/passwd && \ + echo 'RancherOS \n \l' > /etc/issue && \ + rm -rf /run \ + /linuxrc \ + /etc/os-release \ + /var/cache \ + /var/lock \ + /var/log \ + /var/run \ + /var/spool \ + /var/lib/misc && \ + mkdir -p \ + /home \ + /run \ + /var/cache \ + /var/lock \ + /var/log \ + /var/run \ + /var/spool && \ + passwd -l root && \ + addgroup -g 1100 rancher && \ + addgroup -g 1101 docker && \ + addgroup -g 1103 sudo && \ + adduser -u 1100 -G rancher -D -h /home/rancher -s /bin/bash rancher && \ + adduser -u 1101 -G docker -D -h /home/docker -s /bin/bash docker && \ + adduser rancher docker && \ + adduser rancher sudo && \ + adduser docker sudo && \ + echo '%sudo ALL=(ALL) ALL' >> /etc/sudoers +COPY inputrc /etc/inputrc +COPY entry.sh /usr/sbin/entry.sh +COPY growpart /usr/bin/growpart + +ENTRYPOINT ["/usr/sbin/entry.sh"] diff --git a/images/01-base/entry.sh b/images/01-base/entry.sh new file mode 100755 index 00000000..ca1f5198 --- /dev/null +++ b/images/01-base/entry.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -e /host/dev ]; then + mount --rbind /host/dev /dev +fi + +CA_BASE=/etc/ssl/certs/ca-certificates.crt.rancher +CA=/etc/ssl/certs/ca-certificates.crt + +if [[ -e ${CA_BASE} && ! -e ${CA} ]]; then + cp $CA_BASE $CA +fi + +exec "$@" diff --git a/images/01-base/growpart b/images/01-base/growpart new file mode 100755 index 00000000..d69bcf9f --- /dev/null +++ b/images/01-base/growpart @@ -0,0 +1,780 @@ +#!/bin/sh +# Copyright (C) 2011 Canonical Ltd. +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# +# Authors: Scott Moser +# Juerg Haefliger +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# the fudge factor. if within this many bytes dont bother +FUDGE=${GROWPART_FUDGE:-$((1024*1024))} +TEMP_D="" +RESTORE_FUNC="" +RESTORE_HUMAN="" +VERBOSITY=0 +DISK="" +PART="" +PT_UPDATE=false +DRY_RUN=0 + +SFDISK_VERSION="" +SFDISK_2_26="22600" +SFDISK_V_WORKING_GPT="22603" +MBR_BACKUP="" +GPT_BACKUP="" +_capture="" + +error() { + echo "$@" 1>&2 +} + +fail() { + [ $# -eq 0 ] || echo "FAILED:" "$@" + exit 2 +} + +nochange() { + echo "NOCHANGE:" "$@" + exit 1 +} + +changed() { + echo "CHANGED:" "$@" + exit 0 +} + +change() { + echo "CHANGE:" "$@" + exit 0 +} + +cleanup() { + if [ -n "${RESTORE_FUNC}" ]; then + error "***** WARNING: Resize failed, attempting to revert ******" + if ${RESTORE_FUNC} ; then + error "***** Appears to have gone OK ****" + else + error "***** FAILED! ******" + if [ -n "${RESTORE_HUMAN}" -a -f "${RESTORE_HUMAN}" ]; then + error "**** original table looked like: ****" + cat "${RESTORE_HUMAN}" 1>&2 + else + error "We seem to have not saved the partition table!" + fi + fi + fi + [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}" +} + +debug() { + local level=${1} + shift + [ "${level}" -gt "${VERBOSITY}" ] && return + if [ "${DEBUG_LOG}" ]; then + echo "$@" >>"${DEBUG_LOG}" + else + error "$@" + fi +} + +debugcat() { + local level="$1" + shift; + [ "${level}" -gt "$VERBOSITY" ] && return + if [ "${DEBUG_LOG}" ]; then + cat "$@" >>"${DEBUG_LOG}" + else + cat "$@" 1>&2 + fi +} + +mktemp_d() { + # just a mktemp -d that doens't need mktemp if its not there. + _RET=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX" 2>/dev/null) && + return + _RET=$(umask 077 && t="${TMPDIR:-/tmp}/${0##*/}.$$" && + mkdir "${t}" && echo "${t}") + return +} + +Usage() { + cat <&2 + error "$@" + exit 2 +} + +sfdisk_restore_legacy() { + sfdisk --no-reread "${DISK}" -I "${MBR_BACKUP}" +} + +sfdisk_restore() { + # files are named: sfdisk--.bak + local f="" offset="" fails=0 + for f in "${MBR_BACKUP}"*.bak; do + [ -f "$f" ] || continue + offset=${f##*-} + offset=${offset%.bak} + [ "$offset" = "$f" ] && { + error "WARN: confused by file $f"; + continue; + } + dd "if=$f" "of=${DISK}" seek=$(($offset)) bs=1 conv=notrunc || + { error "WARN: failed restore from $f"; fails=$(($fails+1)); } + done + return $fails +} + +sfdisk_worked_but_blkrrpart_failed() { + local ret="$1" output="$2" + # exit code found was just 1, but dont insist on that + #[ $ret -eq 1 ] || return 1 + # Successfully wrote the new partition table + grep -qi "Success.* wrote.* new.* partition" "$output" && + grep -qi "BLKRRPART: Device or resource busy" "$output" + return +} + +get_sfdisk_version() { + # set SFDISK_VERSION to MAJOR*10000+MINOR*100+MICRO + local out oifs="$IFS" ver="" + [ -n "$SFDISK_VERSION" ] && return 0 + # expected output: sfdisk from util-linux 2.25.2 + out=$(sfdisk --version) || + { error "failed to get sfdisk version"; return 1; } + set -- $out + ver=$4 + case "$ver" in + [0-9]*.[0-9]*.[0-9]|[0-9].[0-9]*) + IFS="."; set -- $ver; IFS="$oifs" + SFDISK_VERSION=$(($1*10000+$2*100+${3:-0})) + return 0;; + *) error "unexpected output in sfdisk --version [$out]" + return 1;; + esac +} + +resize_sfdisk() { + local humanpt="${TEMP_D}/recovery" + local mbr_backup="${TEMP_D}/orig.save" + local restore_func="" + local format="$1" + + local change_out=${TEMP_D}/change.out + local dump_out=${TEMP_D}/dump.out + local new_out=${TEMP_D}/new.out + local dump_mod=${TEMP_D}/dump.mod + local tmp="${TEMP_D}/tmp.out" + local err="${TEMP_D}/err.out" + local mbr_max_512="4294967296" + + local pt_start pt_size pt_end max_end new_size change_info dpart + local sector_num sector_size disk_size tot out + + rqe sfd_list sfdisk --list --unit=S "$DISK" >"$tmp" || + fail "failed: sfdisk --list $DISK" + if [ "${SFDISK_VERSION}" -lt ${SFDISK_2_26} ]; then + # exected output contains: Units: sectors of 512 bytes, ... + out=$(awk '$1 == "Units:" && $5 ~ /bytes/ { print $4 }' "$tmp") || + fail "failed to read sfdisk output" + if [ -z "$out" ]; then + error "WARN: sector size not found in sfdisk output, assuming 512" + sector_size=512 + else + sector_size="$out" + fi + local _w _cyl _w1 _heads _w2 sectors _w3 t s + # show-size is in units of 1024 bytes (same as /proc/partitions) + t=$(sfdisk --show-size "${DISK}") || + fail "failed: sfdisk --show-size $DISK" + disk_size=$((t*1024)) + sector_num=$(($disk_size/$sector_size)) + msg="disk size '$disk_size' not evenly div by sector size '$sector_size'" + [ "$((${disk_size}%${sector_size}))" -eq 0 ] || + error "WARN: $msg" + restore_func=sfdisk_restore_legacy + else + # --list first line output: + # Disk /dev/vda: 20 GiB, 21474836480 bytes, 41943040 sectors + local _x + read _x _x _x _x disk_size _x sector_num _x < "$tmp" + sector_size=$((disk_size/$sector_num)) + restore_func=sfdisk_restore + fi + + debug 1 "$sector_num sectors of $sector_size. total size=${disk_size} bytes" + [ $(($disk_size/512)) -gt $mbr_max_512 ] && + debug 1 "WARN: disk is larger than 2TB. additional space will go unused." + + rqe sfd_dump sfdisk --unit=S --dump "${DISK}" >"${dump_out}" || + fail "failed to dump sfdisk info for ${DISK}" + RESTORE_HUMAN="$dump_out" + + { + echo "## sfdisk --unit=S --dump ${DISK}" + cat "${dump_out}" + } >"$humanpt" + + [ $? -eq 0 ] || fail "failed to save sfdisk -d output" + RESTORE_HUMAN="$humanpt" + + debugcat 1 "$humanpt" + + sed -e 's/,//g; s/start=/start /; s/size=/size /' "${dump_out}" \ + >"${dump_mod}" || + fail "sed failed on dump output" + + dpart="${DISK}${PART}" # disk and partition number + if [ -b "${DISK}p${PART}" -a "${DISK%[0-9]}" != "${DISK}" ]; then + # for block devices that end in a number (/dev/nbd0) + # the partition is "p" (/dev/nbd0p1) + dpart="${DISK}p${PART}" + elif [ "${DISK#/dev/loop[0-9]}" != "${DISK}" ]; then + # for /dev/loop devices, sfdisk output will be p + # format also, even though there is not a device there. + dpart="${DISK}p${PART}" + fi + + pt_start=$(awk '$1 == pt { print $4 }' "pt=${dpart}" <"${dump_mod}") && + pt_size=$(awk '$1 == pt { print $6 }' "pt=${dpart}" <"${dump_mod}") && + [ -n "${pt_start}" -a -n "${pt_size}" ] && + pt_end=$((${pt_size}+${pt_start})) || + fail "failed to get start and end for ${dpart} in ${DISK}" + + # find the minimal starting location that is >= pt_end + max_end=$(awk '$3 == "start" { if($4 >= pt_end && $4 < min) + { min = $4 } } END { printf("%s\n",min); }' \ + min=${sector_num} pt_end=${pt_end} "${dump_mod}") && + [ -n "${max_end}" ] || + fail "failed to get max_end for partition ${PART}" + + mbr_max_sectors=$((mbr_max_512*$((sector_size/512)))) + if [ "$max_end" -gt "$mbr_max_sectors" ]; then + max_end=$mbr_max_sectors + fi + + if [ "$format" = "gpt" ]; then + # sfdisk respects 'last-lba' in input, and complains about + # partitions that go past that. without it, it does the right thing. + sed -i '/^last-lba:/d' "$dump_out" || + fail "failed to remove last-lba from output" + fi + + local gpt_second_size="33" + if [ "${max_end}" -gt "$((${sector_num}-${gpt_second_size}))" ]; then + # if mbr allow subsequent conversion to gpt without shrinking the + # partition. safety net at cost of 33 sectors, seems reasonable. + # if gpt, we can't write there anyway. + debug 1 "padding ${gpt_second_size} sectors for gpt secondary header" + max_end=$((${sector_num}-${gpt_second_size})) + fi + + debug 1 "max_end=${max_end} tot=${sector_num} pt_end=${pt_end}" \ + "pt_start=${pt_start} pt_size=${pt_size}" + [ $((${pt_end})) -eq ${max_end} ] && + nochange "partition ${PART} is size ${pt_size}. it cannot be grown" + [ $((${pt_end}+(${FUDGE}/$sector_size))) -gt ${max_end} ] && + nochange "partition ${PART} could only be grown by" \ + "$((${max_end}-${pt_end})) [fudge=$((${FUDGE}/$sector_size))]" + + # now, change the size for this partition in ${dump_out} to be the + # new size + new_size=$((${max_end}-${pt_start})) + sed "\|^\s*${dpart} |s/${pt_size},/${new_size},/" "${dump_out}" \ + >"${new_out}" || + fail "failed to change size in output" + + change_info="partition=${PART} start=${pt_start} old: size=${pt_size} end=${pt_end} new: size=${new_size},end=${max_end}" + if [ ${DRY_RUN} -ne 0 ]; then + echo "CHANGE: ${change_info}" + { + echo "# === old sfdisk -d ===" + cat "${dump_out}" + echo "# === new sfdisk -d ===" + cat "${new_out}" + } 1>&2 + exit 0 + fi + + MBR_BACKUP="${mbr_backup}" + LANG=C sfdisk --no-reread "${DISK}" --force \ + -O "${mbr_backup}" <"${new_out}" >"${change_out}" 2>&1 + ret=$? + [ $ret -eq 0 ] || RESTORE_FUNC="${restore_func}" + + if [ $ret -eq 0 ]; then + : + elif $PT_UPDATE && + sfdisk_worked_but_blkrrpart_failed "$ret" "${change_out}"; then + # if the command failed, but it looks like only because + # the device was busy and we have pt_update, then go on + debug 1 "sfdisk failed, but likely only because of blkrrpart" + else + error "attempt to resize ${DISK} failed. sfdisk output below:" + sed 's,^,| ,' "${change_out}" 1>&2 + fail "failed to resize" + fi + + rq pt_update pt_update "$DISK" "$PART" || + fail "pt_resize failed" + + RESTORE_FUNC="" + + changed "${change_info}" + + # dump_out looks something like: + ## partition table of /tmp/out.img + #unit: sectors + # + #/tmp/out.img1 : start= 1, size= 48194, Id=83 + #/tmp/out.img2 : start= 48195, size= 963900, Id=83 + #/tmp/out.img3 : start= 1012095, size= 305235, Id=82 + #/tmp/out.img4 : start= 1317330, size= 771120, Id= 5 + #/tmp/out.img5 : start= 1317331, size= 642599, Id=83 + #/tmp/out.img6 : start= 1959931, size= 48194, Id=83 + #/tmp/out.img7 : start= 2008126, size= 80324, Id=83 +} + +gpt_restore() { + sgdisk -l "${GPT_BACKUP}" "${DISK}" +} + +resize_sgdisk() { + GPT_BACKUP="${TEMP_D}/pt.backup" + + local pt_info="${TEMP_D}/pt.info" + local pt_pretend="${TEMP_D}/pt.pretend" + local pt_data="${TEMP_D}/pt.data" + local out="${TEMP_D}/out" + + local dev="disk=${DISK} partition=${PART}" + + local pt_start pt_end pt_size last pt_max code guid name new_size + local old new change_info sector_size + + # Dump the original partition information and details to disk. This is + # used in case something goes wrong and human interaction is required + # to revert any changes. + rqe sgd_info sgdisk "--info=${PART}" --print "${DISK}" >"${pt_info}" || + fail "${dev}: failed to dump original sgdisk info" + RESTORE_HUMAN="${pt_info}" + + sector_size=$(awk '$0 ~ /^Logical sector size:.*bytes/ { print $4 }' \ + "$pt_info") && [ -n "$sector_size" ] || { + sector_size=512 + error "WARN: did not find sector size, assuming 512" + } + + debug 1 "$dev: original sgdisk info:" + debugcat 1 "${pt_info}" + + # Pretend to move the backup GPT header to the end of the disk and dump + # the resulting partition information. We use this info to determine if + # we have to resize the partition. + rqe sgd_pretend sgdisk --pretend --move-second-header \ + --print "${DISK}" >"${pt_pretend}" || + fail "${dev}: failed to dump pretend sgdisk info" + + debug 1 "$dev: pretend sgdisk info" + debugcat 1 "${pt_pretend}" + + # Extract the partition data from the pretend dump + awk 'found { print } ; $1 == "Number" { found = 1 }' \ + "${pt_pretend}" >"${pt_data}" || + fail "${dev}: failed to parse pretend sgdisk info" + + # Get the start and end sectors of the partition to be grown + pt_start=$(awk '$1 == '"${PART}"' { print $2 }' "${pt_data}") && + [ -n "${pt_start}" ] || + fail "${dev}: failed to get start sector" + pt_end=$(awk '$1 == '"${PART}"' { print $3 }' "${pt_data}") && + [ -n "${pt_end}" ] || + fail "${dev}: failed to get end sector" + pt_size="$((${pt_end} - ${pt_start}))" + + # Get the last usable sector + last=$(awk '/last usable sector is/ { print $NF }' \ + "${pt_pretend}") && [ -n "${last}" ] || + fail "${dev}: failed to get last usable sector" + + # Find the minimal start sector that is >= pt_end + pt_max=$(awk '{ if ($2 >= pt_end && $2 < min) { min = $2 } } END \ + { print min }' min="${last}" pt_end="${pt_end}" \ + "${pt_data}") && [ -n "${pt_max}" ] || + fail "${dev}: failed to find max end sector" + + debug 1 "${dev}: pt_start=${pt_start} pt_end=${pt_end}" \ + "pt_size=${pt_size} pt_max=${pt_max} last=${last}" + + # Check if the partition can be grown + [ "${pt_end}" -eq "${pt_max}" ] && + nochange "${dev}: size=${pt_size}, it cannot be grown" + [ "$((${pt_end} + ${FUDGE}/${sector_size}))" -gt "${pt_max}" ] && + nochange "${dev}: could only be grown by" \ + "$((${pt_max} - ${pt_end})) [fudge=$((${FUDGE}/$sector_size))]" + + # The partition can be grown if we made it here. Get some more info + # about it so we can do it properly. + # FIXME: Do we care about the attribute flags? + code=$(awk '/^Partition GUID code:/ { print $4 }' "${pt_info}") + guid=$(awk '/^Partition unique GUID:/ { print $4 }' "${pt_info}") + name=$(awk '/^Partition name:/ { gsub(/'"'"'/, "") ; \ + if (NF >= 3) print substr($0, index($0, $3)) }' "${pt_info}") + [ -n "${code}" -a -n "${guid}" ] || + fail "${dev}: failed to parse sgdisk details" + + debug 1 "${dev}: code=${code} guid=${guid} name='${name}'" + local wouldrun="" + [ "$DRY_RUN" -ne 0 ] && wouldrun="would-run" + + # Calculate the new size of the partition + new_size=$((${pt_max} - ${pt_start})) + old="old: size=${pt_size},end=${pt_end}" + new="new: size=${new_size},end=${pt_max}" + change_info="${dev}: start=${pt_start} ${old} ${new}" + + # Backup the current partition table, we're about to modify it + rq sgd_backup $wouldrun sgdisk "--backup=${GPT_BACKUP}" "${DISK}" || + fail "${dev}: failed to backup the partition table" + + # Modify the partition table. We do it all in one go (the order is + # important!): + # - move the GPT backup header to the end of the disk + # - delete the partition + # - recreate the partition with the new size + # - set the partition code + # - set the partition GUID + # - set the partition name + rq sgdisk_mod $wouldrun sgdisk --move-second-header "--delete=${PART}" \ + "--new=${PART}:${pt_start}:${pt_max}" \ + "--typecode=${PART}:${code}" \ + "--partition-guid=${PART}:${guid}" \ + "--change-name=${PART}:${name}" "${DISK}" && + rq pt_update $wouldrun pt_update "$DISK" "$PART" || { + RESTORE_FUNC=gpt_restore + fail "${dev}: failed to repartition" + } + + # Dry run + [ "${DRY_RUN}" -ne 0 ] && change "${change_info}" + + changed "${change_info}" +} + +kver_to_num() { + local kver="$1" maj="" min="" mic="0" + kver=${kver%%-*} + maj=${kver%%.*} + min=${kver#${maj}.} + min=${min%%.*} + mic=${kver#${maj}.${min}.} + [ "$kver" = "$mic" ] && mic=0 + _RET=$(($maj*1000*1000+$min*1000+$mic)) +} + +kver_cmp() { + local op="$2" n1="" n2="" + kver_to_num "$1" + n1="$_RET" + kver_to_num "$3" + n2="$_RET" + [ $n1 $op $n2 ] +} + +rq() { + # runquieterror(label, command) + # gobble stderr of a command unless it errors + local label="$1" ret="" efile="" + efile="$TEMP_D/$label.err" + shift; + + local rlabel="running" + [ "$1" = "would-run" ] && rlabel="would-run" && shift + + local cmd="" x="" + for x in "$@"; do + [ "${x#* }" != "$x" -o "${x#* \"}" != "$x" ] && x="'$x'" + cmd="$cmd $x" + done + cmd=${cmd# } + + debug 2 "$rlabel[$label][$_capture]" "$cmd" + [ "$rlabel" = "would-run" ] && return 0 + + if [ "${_capture}" = "erronly" ]; then + "$@" 2>"$TEMP_D/$label.err" + ret=$? + else + "$@" >"$TEMP_D/$label.err" 2>&1 + ret=$? + fi + if [ $ret -ne 0 ]; then + error "failed [$label:$ret]" "$@" + cat "$efile" 1>&2 + fi + return $ret +} + +rqe() { + local _capture="erronly" + rq "$@" +} + +verify_ptupdate() { + local input="$1" found="" reason="" kver="" + + # we can always satisfy 'off' + if [ "$input" = "off" ]; then + _RET="false"; + return 0; + fi + + if command -v partx >/dev/null 2>&1; then + local out="" ret=0 + out=$(partx --help 2>&1) + ret=$? + if [ $ret -eq 0 ]; then + echo "$out" | grep -q -- --update || { + reason="partx has no '--update' flag in usage." + found="off" + } + else + reason="'partx --help' returned $ret. assuming it is old." + found="off" + fi + else + reason="no 'partx' command" + found="off" + fi + + if [ -z "$found" ]; then + if [ "$(uname)" != "Linux" ]; then + reason="Kernel is not Linux per uname." + found="off" + fi + fi + + if [ -z "$found" ]; then + kver=$(uname -r) || debug 1 "uname -r failed!" + + if ! kver_cmp "${kver-0.0.0}" -ge 3.8.0; then + reason="Kernel '$kver' < 3.8.0." + found="off" + fi + fi + + if [ -z "$found" ]; then + _RET="true" + return 0 + fi + + case "$input" in + on) error "$reason"; return 1;; + auto) + _RET="false"; + debug 1 "partition update disabled: $reason" + return 0;; + force) + _RET="true" + error "WARNING: ptupdate forced on even though: $reason" + return 0;; + esac + error "unknown input '$input'"; + return 1; +} + +pt_update() { + local dev="$1" part="$2" update="${3:-$PT_UPDATE}" + if ! $update; then + return 0 + fi + # partx only works on block devices (do not run on file) + [ -b "$dev" ] || return 0 + partx --update "$part" "$dev" +} + +has_cmd() { + command -v "${1}" >/dev/null 2>&1 +} + +resize_sgdisk_gpt() { + resize_sgdisk gpt +} + +resize_sgdisk_dos() { + fail "unable to resize dos label with sgdisk" +} + +resize_sfdisk_gpt() { + resize_sfdisk gpt +} + +resize_sfdisk_dos() { + resize_sfdisk dos +} + +get_table_format() { + local out="" disk="$1" + if has_cmd blkid && out=$(blkid -o value -s PTTYPE "$disk") && + [ "$out" = "dos" -o "$out" = "gpt" ]; then + _RET="$out" + return + fi + _RET="dos" + if [ ${SFDISK_VERSION} -lt ${SFDISK_2_26} ] && + out=$(sfdisk --id --force "$disk" 1 2>/dev/null); then + if [ "$out" = "ee" ]; then + _RET="gpt" + else + _RET="dos" + fi + return + elif out=$(LANG=C sfdisk --list "$disk"); then + out=$(echo "$out" | sed -e '/Disklabel type/!d' -e 's/.*: //') + case "$out" in + gpt|dos) _RET="$out";; + *) error "WARN: unknown label $out";; + esac + fi +} + +get_resizer() { + local format="$1" user=${2:-"auto"} + + case "$user" in + sgdisk) _RET="resize_sgdisk_$format"; return;; + sfdisk) _RET="resize_sfdisk_$format"; return;; + auto) :;; + *) error "unexpected input: '$user'";; + esac + + if [ "$format" = "dos" ]; then + _RET="resize_sfdisk_dos" + return 0 + fi + + if [ "${SFDISK_VERSION}" -ge ${SFDISK_V_WORKING_GPT} ]; then + # sfdisk 2.26.2 works for resize but loses type (LP: #1474090) + _RET="resize_sfdisk_gpt" + elif has_cmd sgdisk; then + _RET="resize_sgdisk_$format" + else + error "no tools available to resize disk with '$format'" + return 1 + fi + return 0 +} + +pt_update="auto" +resizer=${GROWPART_RESIZER:-"auto"} +while [ $# -ne 0 ]; do + cur=${1} + next=${2} + case "$cur" in + -h|--help) + Usage + exit 0 + ;; + --fudge) + FUDGE=${next} + shift + ;; + -N|--dry-run) + DRY_RUN=1 + ;; + -u|--update|--update=*) + if [ "${cur#--update=}" != "$cur" ]; then + next="${cur#--update=}" + else + shift + fi + case "$next" in + off|auto|force|on) pt_update=$next;; + *) fail "unknown --update option: $next";; + esac + ;; + -v|--verbose) + VERBOSITY=$(($VERBOSITY+1)) + ;; + --) + shift + break + ;; + -*) + fail "unknown option ${cur}" + ;; + *) + if [ -z "${DISK}" ]; then + DISK=${cur} + else + [ -z "${PART}" ] || fail "confused by arg ${cur}" + PART=${cur} + fi + ;; + esac + shift +done + +[ -n "${DISK}" ] || bad_Usage "must supply disk and partition-number" +[ -n "${PART}" ] || bad_Usage "must supply partition-number" + +has_cmd "sfdisk" || fail "sfdisk not found" +get_sfdisk_version || fail + +[ -e "${DISK}" ] || fail "${DISK}: does not exist" + +[ "${PART#*[!0-9]}" = "${PART}" ] || fail "partition-number must be a number" + +verify_ptupdate "$pt_update" || fail +PT_UPDATE=$_RET + +debug 1 "update-partition set to $PT_UPDATE" + +mktemp_d && TEMP_D="${_RET}" || fail "failed to make temp dir" +trap cleanup 0 # EXIT - some shells may not like 'EXIT' but are ok with 0 + +# get the ID of the first partition to determine if it's MBR or GPT +get_table_format "$DISK" || fail +format=$_RET +get_resizer "$format" "$resizer" || + fail "failed to get a resizer for id '$id'" +resizer=$_RET + +debug 1 "resizing $PART on $DISK using $resizer" +"$resizer" + +# vi: ts=4 noexpandtab diff --git a/images/01-base/inputrc b/images/01-base/inputrc new file mode 100644 index 00000000..d3da9859 --- /dev/null +++ b/images/01-base/inputrc @@ -0,0 +1,67 @@ +# /etc/inputrc - global inputrc for libreadline +# See readline(3readline) and `info rluserman' for more information. + +# Be 8 bit clean. +set input-meta on +set output-meta on + +# To allow the use of 8bit-characters like the german umlauts, uncomment +# the line below. However this makes the meta key not work as a meta key, +# which is annoying to those which don't need to type in 8-bit characters. + +# set convert-meta off + +# try to enable the application keypad when it is called. Some systems +# need this to enable the arrow keys. +# set enable-keypad on + +# see /usr/share/doc/bash/inputrc.arrows for other codes of arrow keys + +# do not bell on tab-completion +# set bell-style none +# set bell-style visible + +# some defaults / modifications for the emacs mode +$if mode=emacs + +# allow the use of the Home/End keys +"\e[1~": beginning-of-line +"\e[4~": end-of-line + +# allow the use of the Delete/Insert keys +"\e[3~": delete-char +"\e[2~": quoted-insert + +# mappings for "page up" and "page down" to step to the beginning/end +# of the history +# "\e[5~": beginning-of-history +# "\e[6~": end-of-history + +# alternate mappings for "page up" and "page down" to search the history +# "\e[5~": history-search-backward +# "\e[6~": history-search-forward + +# mappings for Ctrl-left-arrow and Ctrl-right-arrow for word moving +"\e[1;5C": forward-word +"\e[1;5D": backward-word +"\e[5C": forward-word +"\e[5D": backward-word +"\e\e[C": forward-word +"\e\e[D": backward-word + +$if term=rxvt +"\e[7~": beginning-of-line +"\e[8~": end-of-line +"\eOc": forward-word +"\eOd": backward-word +$endif + +# for non RH/Debian xterm, can't hurt for RH/Debian xterm +# "\eOH": beginning-of-line +# "\eOF": end-of-line + +# for freebsd console +# "\e[H": beginning-of-line +# "\e[F": end-of-line + +$endif diff --git a/images/02-acpid/Dockerfile b/images/02-acpid/Dockerfile new file mode 100644 index 00000000..e2f1cc9c --- /dev/null +++ b/images/02-acpid/Dockerfile @@ -0,0 +1,2 @@ +FROM rancher/os-base +CMD ["/usr/sbin/acpid", "-f"] diff --git a/images/02-autoformat/Dockerfile b/images/02-autoformat/Dockerfile new file mode 100644 index 00000000..9424c9ca --- /dev/null +++ b/images/02-autoformat/Dockerfile @@ -0,0 +1,4 @@ +FROM rancher/os-base +COPY auto-format.sh /usr/sbin/ +COPY od-1m0 / +ENTRYPOINT ["/usr/sbin/auto-format.sh"] diff --git a/images/02-autoformat/auto-format.sh b/images/02-autoformat/auto-format.sh new file mode 100755 index 00000000..053b1b2a --- /dev/null +++ b/images/02-autoformat/auto-format.sh @@ -0,0 +1,67 @@ +#!/bin/bash +set -ex + +MAGIC=${MAGIC:-"boot2docker, please format-me"} + +AUTOFORMAT=${AUTOFORMAT:-"/dev/sda /dev/vda"} +DEVS=(${AUTOFORMAT}) +FORMATZERO=${FORMATZERO:-false} + +for dev in ${DEVS[@]}; do + if [ -b "${dev}" ]; then + + # Test for our magic string (it means that the disk was made by ./boot2docker init) + HEADER=`dd if=${dev} bs=1 count=${#MAGIC} 2>/dev/null` + + if [ "$HEADER" = "$MAGIC" ]; then + # save the preload userdata.tar file + dd if=${dev} of=/userdata.tar bs=1 count=8192 + elif [ "${FORMATZERO}" != "true" ]; then + # do not try to guess whether to auto-format a disk beginning with 1MB filled with 00 + continue + elif ! od -A d -N 1048576 ${dev} | head -n 3 | diff ./od-1m0 - >/dev/null 2>&1; then + # do not auto-format if the disk does not begin with 1MB filled with 00 + continue + fi + + mkfs.ext4 -L RANCHER_STATE ${dev} + + if [ -e "/userdata.tar" ]; then + mkdir -p /mnt/new-root + mount -t ext4 ${dev} /mnt/new-root + pushd /mnt/new-root + mkdir -p ./var/lib/rancher/conf/cloud-config.d + echo $(tar -xvf /userdata.tar) + AUTHORIZED_KEY1=$(cat ./.ssh/authorized_keys) + AUTHORIZED_KEY2=$(cat ./.ssh/authorized_keys2) + tee ./var/lib/rancher/conf/cloud-config.d/machine.yml << EOF +#cloud-config + +rancher: + network: + interfaces: + eth0: + dhcp: true + eth1: + dhcp: true + lo: + address: 127.0.0.1/8 + +ssh_authorized_keys: + - ${AUTHORIZED_KEY1} + - ${AUTHORIZED_KEY2} + +users: + - name: docker + ssh_authorized_keys: + - ${AUTHORIZED_KEY1} + - ${AUTHORIZED_KEY2} +EOF + popd + umount /mnt/new-root + fi + + # do not check another device + break + fi +done diff --git a/images/02-autoformat/od-1m0 b/images/02-autoformat/od-1m0 new file mode 100644 index 00000000..20128b03 --- /dev/null +++ b/images/02-autoformat/od-1m0 @@ -0,0 +1,3 @@ +0000000 000000 000000 000000 000000 000000 000000 000000 000000 +* +1048576 diff --git a/images/02-cloudinit/Dockerfile b/images/02-cloudinit/Dockerfile new file mode 100644 index 00000000..88aa0d6c --- /dev/null +++ b/images/02-cloudinit/Dockerfile @@ -0,0 +1,3 @@ +FROM rancher/os-base +COPY cloud-init.sh / +CMD ["/cloud-init.sh"] diff --git a/images/02-cloudinit/cloud-init.sh b/images/02-cloudinit/cloud-init.sh new file mode 100755 index 00000000..026452d8 --- /dev/null +++ b/images/02-cloudinit/cloud-init.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -x -e + +MOUNT_POINT=/media/config-2 +CONFIG_DEV=$(ros dev "LABEL=config-2") + +mkdir -p ${MOUNT_POINT} + +if [ -e "${CONFIG_DEV}" ]; then + mount -t iso9660,vfat ${CONFIG_DEV} ${MOUNT_POINT} +else + mount -t 9p -o trans=virtio,version=9p2000.L config-2 ${MOUNT_POINT} 2>/dev/null || true +fi + +cloud-init -save -network=${CLOUD_INIT_NETWORK:-true} diff --git a/images/02-console/Dockerfile b/images/02-console/Dockerfile new file mode 100644 index 00000000..c63e7c9f --- /dev/null +++ b/images/02-console/Dockerfile @@ -0,0 +1,16 @@ +FROM rancher/os-base +COPY console.sh docker-init update-ssh-keys rancheros-install /usr/sbin/ +COPY build/lsb-release /etc/ +RUN sed -i 's/rancher:!/rancher:*/g' /etc/shadow && \ + sed -i 's/docker:!/docker:*/g' /etc/shadow && \ + sed -i 's/#ClientAliveInterval 0/ClientAliveInterval 180/g' /etc/ssh/sshd_config && \ + echo '## allow password less for rancher user' >> /etc/sudoers && \ + echo 'rancher ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers && \ + echo '## allow password less for docker user' >> /etc/sudoers && \ + echo 'docker ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers && \ + ln -sf /usr/bin/docker.dist /usr/bin/docker && \ + ln -sf /usr/bin/docker-containerd.dist /usr/bin/docker-containerd && \ + ln -sf /usr/bin/docker-containerd-shim.dist /usr/bin/docker-containerd-shim && \ + ln -sf /usr/bin/docker-runc.dist /usr/bin/docker-runc +COPY prompt.sh /etc/profile.d/ +CMD ["/usr/sbin/console.sh"] diff --git a/images/02-console/console.sh b/images/02-console/console.sh new file mode 100755 index 00000000..5550eaf6 --- /dev/null +++ b/images/02-console/console.sh @@ -0,0 +1,147 @@ +#!/bin/bash +set -e -x + +setup_ssh() +{ + for i in rsa dsa ecdsa ed25519; do + local output=/etc/ssh/ssh_host_${i}_key + if [ ! -s $output ]; then + local saved="$(ros config get rancher.ssh.keys.${i})" + local pub="$(ros config get rancher.ssh.keys.${i}-pub)" + + if [[ -n "$saved" && -n "$pub" ]]; then + ( + umask 077 + temp_file=$(mktemp) + echo "$saved" > ${temp_file} + mv ${temp_file} ${output} + temp_file=$(mktemp) + echo "$pub" > ${temp_file} + mv ${temp_file} ${output}.pub + ) + else + ssh-keygen -f $output -N '' -t $i + ros config set -- rancher.ssh.keys.${i} "$(<${output})" + ros config set -- rancher.ssh.keys.${i}-pub "$(<${output}.pub)" + fi + fi + done + + mkdir -p /var/run/sshd +} + +setup_cgroup() +{ + local cgroup=$(grep name=systemd /proc/$$/cgroup | cut -f3 -d:) + if [ -n "$cgroup" ]; then + mkdir -p /sys/fs/cgroup/systemd${cgroup} + fi +} + +setup_cgroup || true + +RANCHER_HOME=/home/rancher +if [ ! -d ${RANCHER_HOME} ]; then + mkdir -p ${RANCHER_HOME} + chown rancher:rancher ${RANCHER_HOME} + chmod 2755 ${RANCHER_HOME} +fi + +DOCKER_HOME=/home/docker +if [ ! -d ${DOCKER_HOME} ]; then + mkdir -p ${DOCKER_HOME} + chown docker:docker ${DOCKER_HOME} + chmod 2755 ${DOCKER_HOME} +fi + +echo 1000000000 > /proc/sys/fs/file-max + +for i in $( /etc/respawn.conf << EOF +/sbin/getty 115200 tty6 +/sbin/getty 115200 tty5 +/sbin/getty 115200 tty4 +/sbin/getty 115200 tty3 +/sbin/getty 115200 tty2 +/sbin/getty 115200 tty1 +/usr/sbin/sshd -D +EOF + +for i in ttyS{0..4} ttyAMA0; do + if grep -q 'console='$i /proc/cmdline; then + echo '/sbin/getty 115200' $i >> /etc/respawn.conf + fi +done + +if ! grep -q '^UseDNS no' /etc/ssh/sshd_config; then + echo "UseDNS no" >> /etc/ssh/sshd_config +fi + +if ! grep -q '^PermitRootLogin no' /etc/ssh/sshd_config; then + echo "PermitRootLogin no" >> /etc/ssh/sshd_config +fi + +if ! grep -q '^ServerKeyBits 2048' /etc/ssh/sshd_config; then + echo "ServerKeyBits 2048" >> /etc/ssh/sshd_config +fi + +if ! grep -q '^AllowGroups docker' /etc/ssh/sshd_config; then + echo "AllowGroups docker" >> /etc/ssh/sshd_config +fi + +VERSION="$(ros os version)" +ID_TYPE="busybox" +if [ -e /etc/os-release ] && grep -q 'ID_LIKE=' /etc/os-release; then + ID_TYPE=$(grep 'ID_LIKE=' /etc/os-release | cut -d'=' -f2) +fi + +cat > /etc/os-release << EOF +NAME="RancherOS" +VERSION=$VERSION +ID=rancheros +ID_LIKE=$ID_TYPE +VERSION_ID=$VERSION +PRETTY_NAME="RancherOS" +HOME_URL= +SUPPORT_URL= +BUG_REPORT_URL= +BUILD_ID= +EOF + +echo 'RancherOS \n \l' > /etc/issue +echo $(/sbin/ifconfig | grep -B1 "inet addr" |awk '{ if ( $1 == "inet" ) { print $2 } else if ( $2 == "Link" ) { printf "%s:" ,$1 } }' |awk -F: '{ print $1 ": " $3}') >> /etc/issue + +cloud-init -execute + +if [ -x /var/lib/rancher/conf/cloud-config-script ]; then + echo "Running /var/lib/rancher/conf/cloud-config-script" + /var/lib/rancher/conf/cloud-config-script || true +fi + +if [ -x /opt/rancher/bin/start.sh ]; then + echo Executing custom script + /opt/rancher/bin/start.sh || true +fi + +touch /run/console-done + +if [ -x /etc/rc.local ]; then + echo Executing rc.local + /etc/rc.local || true +fi + +export TERM=linux +exec respawn -f /etc/respawn.conf diff --git a/images/02-console/docker-init b/images/02-console/docker-init new file mode 100755 index 00000000..d02bc809 --- /dev/null +++ b/images/02-console/docker-init @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +if [ -e /var/lib/rancher/conf/docker ]; then + source /var/lib/rancher/conf/docker +fi + +while [ ! -e /run/console-done ]; do + sleep 1 +done + +DOCKER_BIN=$(which docker) || DOCKER_BIN=/usr/bin/docker + +for i in /opt/bin /usr/local/bin; do + if [ -x ${i}/docker ]; then + PATH=${i}:$PATH + DOCKER_BIN=${i}/docker + break + fi +done + +exec /usr/bin/dockerlaunch $DOCKER_BIN "$@" $DOCKER_OPTS >>/var/log/docker.log 2>&1 diff --git a/images/02-console/prebuild.sh b/images/02-console/prebuild.sh new file mode 100755 index 00000000..b4f92e3b --- /dev/null +++ b/images/02-console/prebuild.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +VERSION=${VERSION:?"VERSION not set"} + +cd $(dirname $0) + +rm -rf ./build +mkdir -p ./build + +cat > ./build/lsb-release << EOF +DISTRIB_ID=${DISTRIB_ID} +DISTRIB_RELEASE=${VERSION} +DISTRIB_DESCRIPTION="${DISTRIB_ID} ${VERSION}" +EOF diff --git a/images/02-console/prompt.sh b/images/02-console/prompt.sh new file mode 100644 index 00000000..4438c010 --- /dev/null +++ b/images/02-console/prompt.sh @@ -0,0 +1 @@ +export PS1='[\u@\h \W]\$ ' diff --git a/images/02-console/rancheros-install b/images/02-console/rancheros-install new file mode 100755 index 00000000..9c31faf3 --- /dev/null +++ b/images/02-console/rancheros-install @@ -0,0 +1,9 @@ +#!/bin/bash +set -e + +cat <> $HOME_DIR/.ssh/authorized_keys +fi + +chown -R $USERNAME $HOME_DIR/.ssh diff --git a/images/02-docker/Dockerfile b/images/02-docker/Dockerfile new file mode 100644 index 00000000..d36402a9 --- /dev/null +++ b/images/02-docker/Dockerfile @@ -0,0 +1,2 @@ +FROM rancher/os-base +CMD ["/usr/bin/user-docker"] diff --git a/images/02-ntp/Dockerfile b/images/02-ntp/Dockerfile new file mode 100644 index 00000000..890419bc --- /dev/null +++ b/images/02-ntp/Dockerfile @@ -0,0 +1,3 @@ +FROM rancher/os-base +COPY ntp.sh / +CMD ["/ntp.sh"] diff --git a/images/02-ntp/ntp.sh b/images/02-ntp/ntp.sh new file mode 100755 index 00000000..6cb21b2e --- /dev/null +++ b/images/02-ntp/ntp.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +exec ntpd --nofork -g diff --git a/images/02-preload/Dockerfile b/images/02-preload/Dockerfile new file mode 100644 index 00000000..4185c99a --- /dev/null +++ b/images/02-preload/Dockerfile @@ -0,0 +1,7 @@ +FROM rancher/os-base +RUN ln -sf /usr/bin/docker.dist /usr/bin/docker && \ + ln -sf /usr/bin/docker-containerd.dist /usr/bin/docker-containerd && \ + ln -sf /usr/bin/docker-containerd-shim.dist /usr/bin/docker-containerd-shim && \ + ln -sf /usr/bin/docker-runc.dist /usr/bin/docker-runc +COPY preload.sh / +CMD ["/preload.sh"] diff --git a/images/02-preload/preload.sh b/images/02-preload/preload.sh new file mode 100755 index 00000000..0320127c --- /dev/null +++ b/images/02-preload/preload.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -e + +BASE=${1:-${PRELOAD_DIR}} +BASE=${BASE:-/mnt/preload} + +should_load() { + file=${1} + if [[ ${file} =~ \.done$ ]]; then echo false + elif [ -f ${file} ]; then + if [[ ${file} -nt ${file}.done ]]; then echo true + else echo false + fi + else echo false + fi +} + +if [ -d ${BASE} ]; then + echo Preloading docker images from ${BASE}... + + for file in $(ls ${BASE}); do + path=${BASE}/${file} + loading=$(should_load ${path}) + if [ ${loading} == "true" ]; then + CAT="cat ${path}" + if [[ ${file} =~ \.t?gz$ ]]; then CAT="${CAT} | gunzip"; fi + if [[ ${file} =~ \.t?xz$ ]]; then CAT="${CAT} | unxz"; fi + CAT="${CAT} | docker load" + echo loading from ${path} + eval ${CAT} || : + touch ${path}.done || : + fi + done + + echo Done. +else + echo Can not preload images from ${BASE}: not a dir or does not exist. +fi + diff --git a/images/02-state/Dockerfile b/images/02-state/Dockerfile new file mode 100644 index 00000000..4f530f7c --- /dev/null +++ b/images/02-state/Dockerfile @@ -0,0 +1,2 @@ +FROM rancher/os-base +CMD ["echo"] diff --git a/images/02-statescript/Dockerfile b/images/02-statescript/Dockerfile new file mode 100644 index 00000000..46dacbac --- /dev/null +++ b/images/02-statescript/Dockerfile @@ -0,0 +1,3 @@ +FROM rancher/os-base +COPY state.sh /usr/sbin/ +CMD ["/usr/sbin/state.sh"] diff --git a/images/02-statescript/state.sh b/images/02-statescript/state.sh new file mode 100755 index 00000000..bc11050c --- /dev/null +++ b/images/02-statescript/state.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -x + +if [ "$(ros config get rancher.state.mdadm_scan)" = "true" ]; then + mdadm --assemble --scan +fi + +ros config get rancher.state.script > config.sh +if [ -s config.sh ]; then + chmod +x config.sh + exec ./config.sh +fi diff --git a/images/02-syslog/Dockerfile b/images/02-syslog/Dockerfile new file mode 100644 index 00000000..b97ad7bf --- /dev/null +++ b/images/02-syslog/Dockerfile @@ -0,0 +1,4 @@ +FROM rancher/os-base +COPY syslog.sh / +RUN sed -i 1,10d /etc/rsyslog.conf +CMD ["/syslog.sh"] diff --git a/images/02-syslog/syslog.sh b/images/02-syslog/syslog.sh new file mode 100755 index 00000000..285ffefa --- /dev/null +++ b/images/02-syslog/syslog.sh @@ -0,0 +1,5 @@ +#!bin/bash + +set -x -e + +exec rsyslogd -n diff --git a/images/02-udev/Dockerfile b/images/02-udev/Dockerfile new file mode 100644 index 00000000..6f8de398 --- /dev/null +++ b/images/02-udev/Dockerfile @@ -0,0 +1,3 @@ +FROM rancher/os-base +COPY udev.sh / +CMD ["/udev.sh"] diff --git a/images/02-udev/udev.sh b/images/02-udev/udev.sh new file mode 100755 index 00000000..2bfa8b49 --- /dev/null +++ b/images/02-udev/udev.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [ "$DAEMON" = true ]; then + exec udevd +fi + +udevd --daemon +udevadm trigger --action=add +udevadm settle + +if [ "$BOOTSTRAP" = true ]; then + # This was needed to get USB devices to fully register + # There is probably a better way to do this + killall udevd + udevd --daemon + udevadm trigger --action=add + udevadm settle +fi diff --git a/init/bootstrap.go b/init/bootstrap.go index 79fb0d9a..bb0dc92d 100644 --- a/init/bootstrap.go +++ b/init/bootstrap.go @@ -7,7 +7,6 @@ import ( "strings" log "github.com/Sirupsen/logrus" - "github.com/docker/libcompose/project" "github.com/rancher/docker-from-scratch" "github.com/rancher/os/compose" "github.com/rancher/os/config" @@ -21,7 +20,7 @@ func autoformat(cfg *config.CloudConfig) (*config.CloudConfig, error) { AUTOFORMAT := "AUTOFORMAT=" + strings.Join(cfg.Rancher.State.Autoformat, " ") FORMATZERO := "FORMATZERO=" + fmt.Sprint(cfg.Rancher.State.FormatZero) t := *cfg - t.Rancher.Autoformat["autoformat"].Environment = project.NewMaporEqualSlice([]string{AUTOFORMAT, FORMATZERO}) + t.Rancher.Autoformat["autoformat"].Environment = []string{AUTOFORMAT, FORMATZERO} log.Info("Running Autoformat services") _, err := compose.RunServiceSet("autoformat", &t, t.Rancher.Autoformat) return &t, err @@ -34,13 +33,12 @@ func runBootstrapContainers(cfg *config.CloudConfig) (*config.CloudConfig, error } func startDocker(cfg *config.CloudConfig) (chan interface{}, error) { - launchConfig, args := getLaunchConfig(cfg, &cfg.Rancher.BootstrapDocker) launchConfig.Fork = true launchConfig.LogFile = "" launchConfig.NoLog = true - cmd, err := dockerlaunch.LaunchDocker(launchConfig, config.DOCKER_BIN, args...) + cmd, err := dockerlaunch.LaunchDocker(launchConfig, config.SYSTEM_DOCKER_BIN, args...) if err != nil { return nil, err } diff --git a/init/init.go b/init/init.go index 851e1961..2226c299 100644 --- a/init/init.go +++ b/init/init.go @@ -118,10 +118,7 @@ func mountState(cfg *config.CloudConfig) error { func mountOem(cfg *config.CloudConfig) (*config.CloudConfig, error) { if cfg == nil { - var err error - if cfg, err = config.LoadConfig(); err != nil { - return cfg, err - } + cfg = config.LoadConfig() } if err := mountConfigured("oem", cfg.Rancher.State.OemDev, cfg.Rancher.State.OemFsType, config.OEM); err != nil { log.Debugf("Not mounting OEM: %v", err) @@ -165,8 +162,8 @@ func getLaunchConfig(cfg *config.CloudConfig, dockerCfg *config.DockerConfig) (* args := dockerlaunch.ParseConfig(&launchConfig, append(dockerCfg.Args, dockerCfg.ExtraArgs...)...) - launchConfig.DnsConfig.Nameservers = cfg.Rancher.DefaultNetwork.Dns.Nameservers - launchConfig.DnsConfig.Search = cfg.Rancher.DefaultNetwork.Dns.Search + launchConfig.DnsConfig.Nameservers = cfg.Rancher.Defaults.Network.Dns.Nameservers + launchConfig.DnsConfig.Search = cfg.Rancher.Defaults.Network.Dns.Search launchConfig.Environment = dockerCfg.Environment launchConfig.EmulateSystemd = true @@ -199,13 +196,10 @@ func RunInit() error { }, mountOem, func(_ *config.CloudConfig) (*config.CloudConfig, error) { - cfg, err := config.LoadConfig() - if err != nil { - return cfg, err - } + cfg := config.LoadConfig() if cfg.Rancher.Debug { - cfgString, err := config.Dump(false, true) + cfgString, err := config.Export(false, true) if err != nil { log.WithFields(log.Fields{"err": err}).Error("Error serializing config") } else { @@ -218,7 +212,7 @@ func RunInit() error { loadModules, tryMountAndBootstrap, func(_ *config.CloudConfig) (*config.CloudConfig, error) { - return config.LoadConfig() + return config.LoadConfig(), nil }, loadModules, func(c *config.CloudConfig) (*config.CloudConfig, error) { @@ -241,7 +235,7 @@ func RunInit() error { launchConfig.Fork = !cfg.Rancher.SystemDocker.Exec log.Info("Launching System Docker") - _, err = dockerlaunch.LaunchDocker(launchConfig, config.DOCKER_BIN, args...) + _, err = dockerlaunch.LaunchDocker(launchConfig, config.SYSTEM_DOCKER_BIN, args...) if err != nil { return err } diff --git a/init/sysinit.go b/init/sysinit.go index 7935b4fe..0dfa96cb 100644 --- a/init/sysinit.go +++ b/init/sysinit.go @@ -5,8 +5,10 @@ import ( "path" "syscall" + "golang.org/x/net/context" + log "github.com/Sirupsen/logrus" - dockerClient "github.com/fsouza/go-dockerclient" + "github.com/docker/libcompose/project/options" "github.com/rancher/os/compose" "github.com/rancher/os/config" "github.com/rancher/os/docker" @@ -76,10 +78,7 @@ func loadImages(cfg *config.CloudConfig) (*config.CloudConfig, error) { defer input.Close() log.Infof("Loading images from %s", inputFileName) - err = client.LoadImage(dockerClient.LoadImageOptions{ - InputStream: input, - }) - if err != nil { + if _, err = client.ImageLoad(context.Background(), input, true); err != nil { return cfg, err } @@ -90,19 +89,21 @@ func loadImages(cfg *config.CloudConfig) (*config.CloudConfig, error) { } func SysInit() error { - cfg, err := config.LoadConfig() - if err != nil { - return err - } + cfg := config.LoadConfig() - _, err = config.ChainCfgFuncs(cfg, + _, err := config.ChainCfgFuncs(cfg, loadImages, func(cfg *config.CloudConfig) (*config.CloudConfig, error) { - p, err := compose.GetProject(cfg, false) + p, err := compose.GetProject(cfg, false, true) if err != nil { return cfg, err } - return cfg, p.Up() + return cfg, p.Up(context.Background(), options.Up{ + Create: options.Create{ + NoRecreate: true, + }, + Log: cfg.Rancher.Log, + }) }, func(cfg *config.CloudConfig) (*config.CloudConfig, error) { syscall.Sync() diff --git a/main.go b/main.go index c4a855f6..dee27724 100644 --- a/main.go +++ b/main.go @@ -1,10 +1,7 @@ package main import ( - "os" - "strings" - - log "github.com/Sirupsen/logrus" + "github.com/docker/docker/docker" "github.com/docker/docker/pkg/reexec" "github.com/rancher/docker-from-scratch" "github.com/rancher/os/cmd/cloudinit" @@ -12,54 +9,38 @@ import ( "github.com/rancher/os/cmd/network" "github.com/rancher/os/cmd/power" "github.com/rancher/os/cmd/respawn" + "github.com/rancher/os/cmd/switchconsole" "github.com/rancher/os/cmd/sysinit" "github.com/rancher/os/cmd/systemdocker" "github.com/rancher/os/cmd/userdocker" "github.com/rancher/os/cmd/wait" - "github.com/rancher/os/cmd/waitfornetwork" - "github.com/rancher/os/config" osInit "github.com/rancher/os/init" ) -func registerCmd(cmd string, mainFunc func()) { - log.Debugf("Registering main %s", cmd) - reexec.Register(cmd, mainFunc) - - parts := strings.Split(cmd, "/") - if len(parts) == 0 { - return - } - - last := parts[len(parts)-1] - - log.Debugf("Registering main %s", last) - reexec.Register(last, mainFunc) - - log.Debugf("Registering main %s", "./"+last) - reexec.Register("./"+last, mainFunc) +var entrypoints = map[string]func(){ + "cloud-init": cloudinit.Main, + "docker": docker.Main, + "dockerlaunch": dockerlaunch.Main, + "halt": power.Halt, + "init": osInit.MainInit, + "netconf": network.Main, + "poweroff": power.PowerOff, + "reboot": power.Reboot, + "respawn": respawn.Main, + "ros-sysinit": sysinit.Main, + "shutdown": power.Main, + "switch-console": switchconsole.Main, + "system-docker": systemdocker.Main, + "user-docker": userdocker.Main, + "wait-for-docker": wait.Main, } func main() { - registerCmd("/init", osInit.MainInit) - registerCmd(config.SYSINIT_BIN, sysinit.Main) - registerCmd("/usr/bin/dockerlaunch", dockerlaunch.Main) - registerCmd("/usr/bin/user-docker", userdocker.Main) - registerCmd("/usr/bin/system-docker", systemdocker.Main) - registerCmd("/sbin/poweroff", power.PowerOff) - registerCmd("/sbin/reboot", power.Reboot) - registerCmd("/sbin/halt", power.Halt) - registerCmd("/sbin/shutdown", power.Main) - registerCmd("/usr/bin/respawn", respawn.Main) - registerCmd("/usr/bin/ros", control.Main) - registerCmd("/usr/bin/cloud-init", cloudinit.Main) - registerCmd("/usr/sbin/netconf", network.Main) - registerCmd("/usr/sbin/wait-for-network", waitfornetwork.Main) - registerCmd("/usr/sbin/wait-for-docker", wait.Main) + for name, f := range entrypoints { + reexec.Register(name, f) + } if !reexec.Init() { - reexec.Register(os.Args[0], control.Main) - if !reexec.Init() { - log.Fatalf("Failed to find an entry point for %s", os.Args[0]) - } + control.Main() } } diff --git a/os-config.tpl.yml b/os-config.tpl.yml index c2e717bf..20c6eb96 100644 --- a/os-config.tpl.yml +++ b/os-config.tpl.yml @@ -1,8 +1,12 @@ -default_hostname: {{.HOSTNAME_DEFAULT}} rancher: + defaults: + hostname: {{.HOSTNAME_DEFAULT}} + network: + dns: + nameservers: [8.8.8.8, 8.8.4.4] bootstrap: state-script: - image: {{.OS_IMAGES_ROOT}}/os-statescript:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-statescript:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.detach: "false" io.rancher.os.scope: system @@ -18,7 +22,7 @@ rancher: - /usr/bin/ros:/usr/bin/ros:ro - /usr/share/ros:/usr/share/ros:ro udev-bootstrap: - image: {{.OS_IMAGES_ROOT}}/os-udev:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-udev:{{.VERSION}}{{.SUFFIX}} environment: - BOOTSTRAP=true labels: @@ -34,7 +38,7 @@ rancher: - /lib/firmware:/lib/firmware autoformat: autoformat: - image: {{.OS_IMAGES_ROOT}}/os-autoformat:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-autoformat:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.detach: "false" io.rancher.os.scope: system @@ -42,7 +46,7 @@ rancher: net: none privileged: true udev-autoformat: - image: {{.OS_IMAGES_ROOT}}/os-udev:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-udev:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.detach: "false" io.rancher.os.scope: system @@ -58,15 +62,13 @@ rancher: bootstrap_docker: args: [daemon, -s, overlay, -b, none, --restart=false, -g, /var/lib/system-docker, -G, root, -H, 'unix:///var/run/system-docker.sock', --userland-proxy=false] + console: default cloud_init: datasources: - configdrive:/media/config-2 - default_network: - dns: - nameservers: [8.8.8.8, 8.8.4.4] repositories: core: - url: {{.OS_SERVICES_REPO}}/{{.VERSION}}{{.SUFFIX}} + url: {{.OS_SERVICES_REPO}}/{{.REPO_VERSION}}{{.SUFFIX}} state: fstype: auto dev: LABEL=RANCHER_STATE @@ -75,7 +77,7 @@ rancher: services: {{if eq "amd64" .ARCH -}} acpid: - image: {{.OS_IMAGES_ROOT}}/os-acpid:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-acpid:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.scope: system net: host @@ -86,7 +88,7 @@ rancher: - system-volumes {{end -}} all-volumes: - image: {{.OS_IMAGES_ROOT}}/os-state:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-state:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.createonly: "true" io.rancher.os.scope: system @@ -100,7 +102,7 @@ rancher: - user-volumes - system-volumes cloud-init: - image: {{.OS_IMAGES_ROOT}}/os-cloudinit:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-cloudinit:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.detach: "false" io.rancher.os.reloadconfig: "true" @@ -113,7 +115,7 @@ rancher: - command-volumes - system-volumes cloud-init-pre: - image: {{.OS_IMAGES_ROOT}}/os-cloudinit:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-cloudinit:{{.VERSION}}{{.SUFFIX}} environment: - CLOUD_INIT_NETWORK=false labels: @@ -128,7 +130,7 @@ rancher: - command-volumes - system-volumes command-volumes: - image: {{.OS_IMAGES_ROOT}}/os-state:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-state:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.createonly: "true" io.rancher.os.scope: system @@ -137,6 +139,9 @@ rancher: privileged: true read_only: true volumes: + - /usr/bin/docker-containerd:/usr/bin/docker-containerd.dist:ro + - /usr/bin/docker-containerd-shim:/usr/bin/docker-containerd-shim.dist:ro + - /usr/bin/docker-runc:/usr/bin/docker-runc.dist:ro - /usr/bin/docker:/usr/bin/docker.dist:ro - /usr/bin/ros:/usr/bin/dockerlaunch:ro - /usr/bin/ros:/usr/bin/user-docker:ro @@ -149,14 +154,15 @@ rancher: - /usr/bin/ros:/usr/bin/ros:ro - /usr/bin/ros:/usr/bin/cloud-init:ro - /usr/bin/ros:/usr/sbin/netconf:ro - - /usr/bin/ros:/usr/sbin/wait-for-network:ro - /usr/bin/ros:/usr/sbin/wait-for-docker:ro + - /usr/bin/ros:/usr/bin/switch-console:ro console: - image: {{.OS_IMAGES_ROOT}}/os-console:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-console:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.scope: system - io.rancher.os.after: wait-for-network + io.rancher.os.after: network io.docker.compose.rebuild: always + io.rancher.os.console: default net: host uts: host pid: host @@ -168,7 +174,7 @@ rancher: volumes: - /usr/bin/iptables:/sbin/iptables:ro container-data-volumes: - image: {{.OS_IMAGES_ROOT}}/os-state:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-state:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.createonly: "true" io.rancher.os.scope: system @@ -180,7 +186,8 @@ rancher: - /var/lib/docker:/var/lib/docker - /var/lib/rkt:/var/lib/rkt network-pre: - image: {{.OS_IMAGES_ROOT}}/os-network:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-base:{{.VERSION}}{{.SUFFIX}} + command: netconf labels: io.rancher.os.scope: system io.rancher.os.after: cloud-init-pre @@ -192,12 +199,11 @@ rancher: - command-volumes - system-volumes network: - image: {{.OS_IMAGES_ROOT}}/os-network:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-base:{{.VERSION}}{{.SUFFIX}} + command: netconf --stop-network-pre labels: io.rancher.os.scope: system io.rancher.os.after: cloud-init - environment: - - DAEMON=true net: host uts: host pid: host @@ -205,41 +211,17 @@ rancher: volumes_from: - command-volumes - system-volumes - wait-for-network-pre: - image: {{.OS_IMAGES_ROOT}}/os-network:{{.VERSION}}{{.SUFFIX}} - command: wait-for-network - labels: - io.rancher.os.detach: "false" - io.rancher.os.scope: system - io.rancher.os.after: network-pre - pid: host - privileged: true - volumes_from: - - command-volumes - - system-volumes - wait-for-network: - image: {{.OS_IMAGES_ROOT}}/os-network:{{.VERSION}}{{.SUFFIX}} - command: wait-for-network - labels: - io.rancher.os.detach: "false" - io.rancher.os.scope: system - io.rancher.os.after: network - pid: host - privileged: true - volumes_from: - - command-volumes - - system-volumes ntp: - image: {{.OS_IMAGES_ROOT}}/os-ntp:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-ntp:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.scope: system - io.rancher.os.after: wait-for-network-pre + io.rancher.os.after: network-pre net: host uts: host privileged: true restart: always preload-system-images: - image: {{.OS_IMAGES_ROOT}}/os-preload:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-preload:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.detach: "false" io.rancher.os.scope: system @@ -251,7 +233,7 @@ rancher: - command-volumes - system-volumes preload-user-images: - image: {{.OS_IMAGES_ROOT}}/os-preload:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-preload:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.detach: "false" io.rancher.os.scope: system @@ -264,7 +246,7 @@ rancher: - command-volumes - system-volumes syslog: - image: {{.OS_IMAGES_ROOT}}/os-syslog:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-syslog:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.scope: system log_driver: json-file @@ -275,7 +257,7 @@ rancher: volumes_from: - system-volumes system-volumes: - image: {{.OS_IMAGES_ROOT}}/os-state:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-state:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.createonly: "true" io.rancher.os.scope: system @@ -295,12 +277,13 @@ rancher: - /lib/modules:/lib/modules - /run:/run - /usr/share/ros:/usr/share/ros + - /var/lib/rancher/cache:/var/lib/rancher/cache - /var/lib/rancher/conf:/var/lib/rancher/conf - /var/lib/rancher:/var/lib/rancher - /var/log:/var/log - /var/run:/var/run udev-cold: - image: {{.OS_IMAGES_ROOT}}/os-udev:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-udev:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.scope: system io.rancher.os.before: udev @@ -310,7 +293,7 @@ rancher: volumes_from: - system-volumes udev: - image: {{.OS_IMAGES_ROOT}}/os-udev:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-udev:{{.VERSION}}{{.SUFFIX}} environment: - DAEMON=true labels: @@ -323,7 +306,7 @@ rancher: volumes_from: - system-volumes user-volumes: - image: {{.OS_IMAGES_ROOT}}/os-state:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-state:{{.VERSION}}{{.SUFFIX}} labels: io.rancher.os.createonly: "true" io.rancher.os.scope: system @@ -335,7 +318,7 @@ rancher: - /home:/home - /opt:/opt docker: - image: {{.OS_IMAGES_ROOT}}/os-docker:{{.VERSION}}{{.SUFFIX}} + image: {{.OS_REPO}}/os-docker:{{.VERSION}}{{.SUFFIX}} environment: - HTTP_PROXY - HTTPS_PROXY @@ -354,12 +337,13 @@ rancher: volumes: - /sys/fs/cgroup:/host/sys/fs/cgroup system_docker: + exec: true args: [daemon, --log-opt, max-size=25m, --log-opt, max-file=2, -s, overlay, -b, docker-sys, --fixed-cidr, 172.18.42.1/16, --restart=false, -g, /var/lib/system-docker, -G, root, -H, 'unix:///var/run/system-docker.sock', --userland-proxy=false] upgrade: url: {{.OS_RELEASES_YML}} - image: {{.OS_IMAGES_ROOT}}/os + image: {{.OS_REPO}}/os docker: tls_args: [--tlsverify, --tlscacert=/etc/docker/tls/ca.pem, --tlscert=/etc/docker/tls/server-cert.pem, --tlskey=/etc/docker/tls/server-key.pem, '-H=0.0.0.0:2376'] diff --git a/scripts/build b/scripts/build new file mode 100755 index 00000000..ec88e1e5 --- /dev/null +++ b/scripts/build @@ -0,0 +1,5 @@ +#!/bin/bash +set -e + +$(dirname $0)/build-target +$(dirname $0)/build-host diff --git a/scripts/build-common b/scripts/build-common deleted file mode 100644 index c755576d..00000000 --- a/scripts/build-common +++ /dev/null @@ -1,2 +0,0 @@ -BUILD=$(pwd)/build -DIST=$(pwd)/dist diff --git a/scripts/build-host b/scripts/build-host new file mode 100755 index 00000000..03d60765 --- /dev/null +++ b/scripts/build-host @@ -0,0 +1,12 @@ +#!/bin/bash + +cd $(dirname $0)/.. + +export OUTPUT=bin/host_ros + +if [[ -e bin/ros && "$HOST_ARCH" = "$ARCH" ]]; then + echo Creating $OUTPUT + cp bin/ros $OUTPUT +else + GOARCH=${HOST_ARCH} TOOLCHAIN= ./scripts/build-target +fi diff --git a/scripts/build-images b/scripts/build-images new file mode 100755 index 00000000..26a87c8a --- /dev/null +++ b/scripts/build-images @@ -0,0 +1,25 @@ +#!/bin/bash +set -e + +export ARCH=${ARCH:-"amd64"} +BASE=images + +source $(dirname $0)/version +cd $(dirname $0)/.. + +for i in $BASE/[0-9]*; do + name="os-$(echo ${i} | cut -f2 -d-)" + tag="${OS_REPO}/${name}:${VERSION}${SUFFIX}" + echo Building ${tag} + if [ -x ${i}/prebuild.sh ]; then + ${i}/prebuild.sh + fi + + if dapper -d --build -f ${i}/Dockerfile -- -t rancher/${name} ${i}; then + docker tag rancher/${name} ${tag} + elif [ "$?" != "42" ]; then + exit 1 + else + echo "WARN: Skipping ${tag}" + fi +done diff --git a/scripts/build-target b/scripts/build-target new file mode 100755 index 00000000..fc7536b1 --- /dev/null +++ b/scripts/build-target @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +ros="$1" + +source $(dirname $0)/version + +cd $(dirname $0)/.. + +if [ "${!TOOLCHAIN}" != "" ]; then + export CC=/usr/bin/${!TOOLCHAIN}-gcc + export CGO_ENABLED=1 +fi + +OUTPUT=${OUTPUT:-bin/ros} +echo Building $OUTPUT + +CONST="-X github.com/docker/docker/dockerversion.GitCommit=${COMMIT} -X github.com/docker/docker/dockerversion.Version=${DOCKER_PATCH_VERSION} -X github.com/docker/docker/dockerversion.BuildTime=$(date -u +'%Y-%m-%dT%H:%M:%SZ') -X github.com/docker/docker/dockerversion.IAmStatic=true -X github.com/rancher/os/config.VERSION=${VERSION}" +go build -tags "selinux cgo daemon netgo" -installsuffix netgo -ldflags "$CONST -linkmode external -extldflags -static" -o ${OUTPUT} +strip --strip-all ${OUTPUT} diff --git a/scripts/build-vbox-vm b/scripts/build-vbox-vm deleted file mode 100755 index 0637c7f6..00000000 --- a/scripts/build-vbox-vm +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -set -x -e - -cd $(dirname $0)/.. -: RANCHER_ISO=${RANCHER_ISO:="./dist/artifacts/rancheros.iso"} - -if [[ -z $RANCHER_ISO ]]; then - echo "Need an ISO..." 1>&2 - exit 1 -fi - -if [ ! -e ${RANCHER_ISO} ]; then - echo "Could not find ISO ${RANCHER_ISO}..." 1>&2 - echo "have you run build.sh yet?" 1>&2 - exit 1 -fi - -GITSHA=$(git rev-parse --short HEAD) -VM="RancherOS-${GITSHA}" - -sudo chown -R `whoami` ./dist - -VBoxManage createhd --format vmdk --filename ./dist/artifacts/$VM.vmdk --size 40000 - -VBoxManage createvm --name $VM --ostype "Linux_64" --register - -VBoxManage storagectl $VM --name "SATA" --add sata --portcount 2 -VBoxManage storageattach $VM --storagectl "SATA" --port 0 --type hdd --medium ./dist/artifacts/$VM.vmdk -VBoxManage storageattach $VM --storagectl "SATA" --port 1 --type dvddrive --medium ${RANCHER_ISO} - -VBoxManage modifyvm $VM --memory 1024 --acpi on --boot1 disk --boot2 dvd -VBoxManage modifyvm $VM --rtcuseutc on -VBoxManage modifyvm $VM --usb off -VBoxManage modifyvm $VM --audio none -VBoxManage modifyvm $VM --nic1 nat -VBoxManage modifyvm $VM --nictype1 virtio - -#VBoxManage startvm $VM diff --git a/scripts/ci b/scripts/ci index e78fd443..4f83609d 100755 --- a/scripts/ci +++ b/scripts/ci @@ -1,7 +1,11 @@ -#!/bin/sh -set -ex +#!/bin/bash +set -e -cd $(dirname $0)/.. -. ./scripts/dapper-common +cd $(dirname $0) -dapper -d -O make HOST_ARCH=${HOST_ARCH} ARCH=${ARCH} DEV_BUILD=1 test +./build +./test +#./validate +./prepare +./package +./integration-test diff --git a/scripts/clean b/scripts/clean new file mode 100755 index 00000000..654c724e --- /dev/null +++ b/scripts/clean @@ -0,0 +1,4 @@ +#!/bin/bash + +cd $(dirname $0)/.. +rm -rf build dist bin images/*/build diff --git a/scripts/dapper-common b/scripts/dapper-common deleted file mode 100755 index 3f5b7261..00000000 --- a/scripts/dapper-common +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh -set -ex - -HOST_ARCH=${HOST_ARCH:-$(docker version | grep 'OS/Arch:' | tail -n+2 | awk '{print $2}' | cut -f2 -d'/')} -HOST_ARCH=${HOST_ARCH:?"Failed to guess HOST_ARCH"} -ARCH=${ARCH:-"$HOST_ARCH"} -export HOST_ARCH ARCH - -cd $(dirname $0)/.. - -[ -f "./.docker-env.${HOST_ARCH}" ] && . ./.docker-env.${HOST_ARCH} || echo "WARNING: missing .docker-env.${HOST_ARCH} (to use an ${HOST_ARCH} docker host)" - -. ./build.conf.${HOST_ARCH} -export HOST_DOCKER_BINARY_URL=${DOCKER_BINARY_URL} -docker inspect $DAPPER_BASE >/dev/null 2>&1 || docker pull $DAPPER_BASE -docker tag $DAPPER_BASE rancher/os-dapper-base - -set -a -. ./build.conf.${ARCH} -set +a diff --git a/scripts/default b/scripts/default new file mode 100755 index 00000000..d17e8165 --- /dev/null +++ b/scripts/default @@ -0,0 +1,7 @@ +#!/bin/bash + +cd $(dirname $0) + +./build +./prepare +./package diff --git a/scripts/dev b/scripts/dev new file mode 100755 index 00000000..740284b5 --- /dev/null +++ b/scripts/dev @@ -0,0 +1,8 @@ +#!/bin/bash +# help: For development, creates iso, kernel, initrd gzip compressed + +cd $(dirname $0) + +./build +./prepare +INSTALLER=0 COMPRESS="gzip -1" ROOTFS=0 ./package diff --git a/scripts/entry b/scripts/entry new file mode 100755 index 00000000..4e8a0f25 --- /dev/null +++ b/scripts/entry @@ -0,0 +1,14 @@ +#!/bin/bash + +mkdir -p bin dist build/initrd +if [ -e ./scripts/$1 ]; then + ./scripts/"$@" +else + "$@" +fi + +EXIT=$? + +chown -R $DAPPER_UID:$DAPPER_GID . + +exit $EXIT diff --git a/scripts/gen-os-config.sh b/scripts/gen-os-config.sh deleted file mode 100755 index fa09c898..00000000 --- a/scripts/gen-os-config.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -set -ex - -cd $(dirname $0)/.. - -set -a -. build.conf -. build.conf.${ARCH} - -SUFFIX="" -[ "${ARCH}" == "amd64" ] || SUFFIX="_${ARCH}" -set +a - -build/host_ros c generate < os-config.tpl.yml > $1 diff --git a/scripts/hash-initrd b/scripts/hash-initrd new file mode 100755 index 00000000..15576562 --- /dev/null +++ b/scripts/hash-initrd @@ -0,0 +1,5 @@ +#!/bin/bash + +cd $(dirname $0)/../build/initrd + +md5sum $(find -type f | sort -u ) | md5sum - | awk '{print $1}' > .id diff --git a/scripts/help b/scripts/help new file mode 100755 index 00000000..7f1f112e --- /dev/null +++ b/scripts/help @@ -0,0 +1,16 @@ +#!/bin/bash + +cd $(dirname $0) + +echo Targets: + +for i in *; do + if [ ! -x $i ] || [ ! -f $i ]; then + continue + fi + + MSG=$(grep '^# help:' $i) + if [ -n "$MSG" ]; then + echo " " ${i}: $(echo $MSG | sed 's/# help://') + fi +done diff --git a/scripts/installer/Dockerfile.amd64 b/scripts/installer/Dockerfile.amd64 new file mode 100644 index 00000000..c41274c4 --- /dev/null +++ b/scripts/installer/Dockerfile.amd64 @@ -0,0 +1,10 @@ +FROM debian:jessie +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update && apt-get install -y grub2 parted kexec-tools + +COPY conf lay-down-os seed-data set-disk-partitions /scripts/installer/ +COPY ./build/build.conf /scripts/ +COPY ./build/vmlinuz /dist/vmlinuz +COPY ./build/initrd /dist/initrd + +ENTRYPOINT ["/scripts/lay-down-os"] diff --git a/Dockerfile.arm64 b/scripts/installer/Dockerfile.arm64 similarity index 70% rename from Dockerfile.arm64 rename to scripts/installer/Dockerfile.arm64 index 5cedb915..c6d36a35 100644 --- a/Dockerfile.arm64 +++ b/scripts/installer/Dockerfile.arm64 @@ -9,10 +9,9 @@ RUN mkdir -p /usr/local/src && \ git clone https://git.linaro.org/people/takahiro.akashi/kexec-tools.git && \ cd kexec-tools && git checkout kdump/for-14 && ./bootstrap && ./configure && make && make install -COPY ./scripts/installer /scripts -COPY ./build.conf /scripts/ - -COPY ./dist/artifacts/vmlinuz /dist/ -COPY ./dist/artifacts/initrd /dist/ +COPY conf lay-down-os seed-data set-disk-partitions /scripts/installer/ +COPY ./build/build.conf /scripts/ +COPY ./build/vmlinuz /dist/vmlinuz +COPY ./build/initrd /dist/initrd ENTRYPOINT ["/scripts/lay-down-os"] diff --git a/scripts/integration-test b/scripts/integration-test new file mode 100755 index 00000000..741803e7 --- /dev/null +++ b/scripts/integration-test @@ -0,0 +1,11 @@ +#!/bin/bash +# help: Run Python based integration tests +set -e + +cd $(dirname $0)/../tests/integration + +if [ ! -e ../../dist/artifacts/initrd ]; then + ../../scripts/dev +fi + +tox "$@" diff --git a/scripts/layout b/scripts/layout new file mode 100755 index 00000000..525c4b52 --- /dev/null +++ b/scripts/layout @@ -0,0 +1,62 @@ +#!/bin/bash +set -e + +source $(dirname $0)/version +cd $(dirname $0)/.. + +BUILD=build +INITRD_DIR=${BUILD}/initrd + +echo Create initrd layout in $INITRD_DIR + +rm -rf ${INITRD_DIR} +mkdir -p ${INITRD_DIR}/usr/{etc,bin,share/ros} + +./scripts/template + +cp -rf assets/selinux ${INITRD_DIR}/usr/etc +cp build/images.tar ${INITRD_DIR}/usr/share/ros/ +cp bin/ros ${INITRD_DIR}/usr/bin/ +ln -s usr/bin/ros ${INITRD_DIR}/init +ln -s bin ${INITRD_DIR}/usr/sbin +ln -s usr/sbin ${INITRD_DIR}/sbin +ln -s ros ${INITRD_DIR}/usr/bin/system-docker + +tar xvzf ${DOWNLOADS}/docker.tgz -C ${INITRD_DIR}/usr/bin --strip-components=1 + +if [ -e ${DOWNLOADS}/kernel.tar.gz ]; then + mkdir -p ${BUILD}/kernel + tar xf ${DOWNLOADS}/kernel.tar.gz -C ${BUILD}/kernel + + for i in vmlinuz vmlinux; do + if [ -e ${BUILD}/kernel/boot/${i}-* ]; then + mkdir -p dist/artifacts + cp ${BUILD}/kernel/boot/${i}-* dist/artifacts/vmlinuz + break + fi + done + + if [ -d ${BUILD}/kernel/lib ]; then + rm -rf ${INITRD_DIR}/usr/lib + cp -rf ${BUILD}/kernel/lib ${INITRD_DIR}/usr/ + depmod -b ${INITRD_DIR}/usr $(basename ${INITRD_DIR}/usr/lib/modules/*) + fi +fi + +if [ -e ${DOWNLOADS}/policy.29 ]; then + mkdir -p ${INITRD_DIR}/usr/etc/selinux/ros/policy/ + cp ${DOWNLOADS}/policy.29 ${INITRD_DIR}/usr/etc/selinux/ros/policy/ +fi + +DFS_ARCH=$(docker create ${DFS_IMAGE}${SUFFIX}) +trap "docker rm -fv ${DFS_ARCH} >/dev/null" EXIT + +docker export ${DFS_ARCH} | tar xf - -C ${INITRD_DIR} --exclude=usr/bin/dockerlaunch \ + --exclude=usr/bin/docker \ + --exclude=usr/share/git-core \ + --exclude=usr/bin/git \ + --exclude=usr/bin/ssh \ + --exclude=usr/libexec/git-core \ + usr + +./scripts/hash-initrd diff --git a/scripts/make.sh b/scripts/make.sh deleted file mode 100755 index 4b84c0b0..00000000 --- a/scripts/make.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -e - -cd $(dirname $0)/.. -. ./scripts/dapper-common - -dapper make HOST_ARCH=${HOST_ARCH} ARCH=${ARCH} "$@" diff --git a/scripts/mk-images-tar.sh b/scripts/mk-images-tar.sh deleted file mode 100755 index e3fe10b0..00000000 --- a/scripts/mk-images-tar.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -ex - -cd $(dirname $0)/.. -. scripts/build-common - -images="$(build/host_ros c images -i build/os-config.yml)" -for i in ${images}; do - [ "${FORCE_PULL}" != "1" ] && docker inspect $i >/dev/null 2>&1 || docker pull $i; -done - -docker save ${images} > ${BUILD}/images.tar diff --git a/scripts/mk-iso-checksums-txt.sh b/scripts/mk-iso-checksums-txt.sh deleted file mode 100755 index 3b34262a..00000000 --- a/scripts/mk-iso-checksums-txt.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -ex - -cd $(dirname $0)/.. -. scripts/build-common - -cd ${DIST}/artifacts -rm -f iso-checksums.txt || : - -for algo in 'sha256' 'md5'; do - echo "$algo: `${algo}sum rancheros.iso`" >> iso-checksums.txt; -done diff --git a/scripts/mk-kernel-tar-from-deb.sh b/scripts/mk-kernel-tar-from-deb.sh deleted file mode 100755 index c3e07eb7..00000000 --- a/scripts/mk-kernel-tar-from-deb.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -set -e -x - -# This script will convert an Ubuntu deb file to the kernel tar structure the RancherOS build expects -# For example -# -# ./scripts/mk-kernel-tar-from-deb.sh linux-image-3.19.0-28-generic_3.19.0-28.30_amd64.deb linux-image-extra-3.19.0-28-generic_3.19.0-28.30_amd64.deb linux-firmware_1.143.3_all.deb -# - -mkdir -p $(dirname $0)/../build -BUILD=$(mktemp -d $(dirname $0)/../build/deb-XXXXX) -mkdir -p $BUILD - -extract() -{ - if [ ! -e $1 ]; then - echo $1 does not exist - exit 1 - fi - - local deb=$(readlink -f $1) - - cd $BUILD - rm -f data.tar.* 2>/dev/null || true - ar x $deb - tar xvf data.tar.* - cd - -} - -for i in "$@"; do - extract $i -done - -cd $BUILD - -KVER=$(ls ./lib/modules) -depmod -b . $KVER - -echo Creating ${OLDPWD}/kernel.tar.gz -tar cvzf ${OLDPWD}/kernel.tar.gz ./lib boot/vmlinuz* -echo Created ${OLDPWD}/kernel.tar.gz - -cd - -rm -rf ${BUILD} diff --git a/scripts/mk-ros.sh b/scripts/mk-ros.sh deleted file mode 100755 index 70bd9731..00000000 --- a/scripts/mk-ros.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -set -ex - -ros="$1" - -ARCH=${ARCH:?"ARCH not set"} -VERSION=${VERSION:?"VERSION not set"} - -cd $(dirname $0)/.. - -strip_bin=$(which strip) -[ "${ARCH}" == "arm" ] && export GOARM=6 -if [ "${TOOLCHAIN}" != "" ]; then - export CC=/usr/bin/${TOOLCHAIN}-gcc - export CGO_ENABLED=1 - strip_bin=/usr/bin/${TOOLCHAIN}-strip -fi -GOARCH=${ARCH} go build -tags netgo -installsuffix netgo -ldflags "-X github.com/rancher/os/config.VERSION=${VERSION} -linkmode external -extldflags -static" -o ${ros} -${strip_bin} --strip-all ${ros} diff --git a/scripts/package b/scripts/package new file mode 100755 index 00000000..22ce0c76 --- /dev/null +++ b/scripts/package @@ -0,0 +1,13 @@ +#!/bin/bash +set -e + +cd $(dirname $0) + +if [ "$ROOTFS" != "0" ]; then + ./package-rootfs +fi +./package-initrd +./package-iso +if [ "$INSTALLER" != "0" ]; then + ./package-installer +fi diff --git a/scripts/package-initrd b/scripts/package-initrd new file mode 100755 index 00000000..84a6c84b --- /dev/null +++ b/scripts/package-initrd @@ -0,0 +1,21 @@ +#!/bin/bash +set -e + +cd $(dirname $0)/.. + +BUILD=$(pwd)/build +INITRD_DIR=${BUILD}/initrd +ARTIFACTS=$(pwd)/dist/artifacts +INITRD=${ARTIFACTS}/initrd + +mkdir -p ${ARTIFACTS} + +if [ "$COMPRESS" == "" ]; then + COMPRESS=lzma +fi + +cd ${INITRD_DIR} + +echo Creating ${INITRD} +find | cpio -H newc -o | ${COMPRESS} > ${INITRD} +echo Done creating ${INITRD} diff --git a/scripts/package-installer b/scripts/package-installer new file mode 100755 index 00000000..d8a11753 --- /dev/null +++ b/scripts/package-installer @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +cd $(dirname $0)/.. + +source ./scripts/version + +DOCKERFILE=./scripts/installer/Dockerfile.${ARCH} + +if [ ! -f $DOCKERFILE ] || [ ! -f dist/artifacts/vmlinuz ] || [ ! -f dist/artifacts/initrd ]; then + echo "Error: ${DOCKERFILE}, dist/artifacts/vmlinuz or dist/artifacts/initrd is missing" + exit 1 +fi + +mkdir -p ./scripts/installer/build +cp ./dist/artifacts/{initrd,vmlinuz} ./scripts/installer/build +echo VERSION=$VERSION > ./scripts/installer/build/build.conf +trap "rm -rf ./scripts/installer/build" EXIT + +docker build -t ${OS_REPO}/os:${VERSION}${SUFFIX} -f $DOCKERFILE ./scripts/installer +echo ${OS_REPO}/os:${VERSION}${SUFFIX} > dist/images +echo Built ${OS_REPO}/os:${VERSION}${SUFFIX} diff --git a/scripts/mk-rancheros-iso.sh b/scripts/package-iso similarity index 50% rename from scripts/mk-rancheros-iso.sh rename to scripts/package-iso index c36a3621..3eaff7b5 100755 --- a/scripts/mk-rancheros-iso.sh +++ b/scripts/package-iso @@ -1,23 +1,29 @@ #!/bin/bash -set -ex +set -e +source $(dirname $0)/version cd $(dirname $0)/.. -. scripts/build-common +ARTIFACTS=$(pwd)/dist/artifacts CD=${BUILD}/cd +mkdir -p ${CD}/boot/isolinux mkdir -p ${CD}/boot/isolinux -cp ${DIST}/artifacts/initrd ${CD}/boot -cp ${DIST}/artifacts/vmlinuz ${CD}/boot +if [ ! -f ${ARTIFACTS}/vmlinuz ] || [ ! -f ${ARTIFACTS}/initrd ]; then + echo "Error: vmlinuz or initrd is missing in ${ARTIFACTS}/" + exit 1 +fi + +cp ${ARTIFACTS}/initrd ${CD}/boot +cp ${ARTIFACTS}/vmlinuz ${CD}/boot cp scripts/isolinux.cfg ${CD}/boot/isolinux cp /usr/lib/ISOLINUX/isolinux.bin ${CD}/boot/isolinux cp /usr/lib/syslinux/modules/bios/ldlinux.c32 ${CD}/boot/isolinux cd ${CD} && xorriso \ - -publisher "Rancher Labs, Inc." \ -as mkisofs \ - -l -J -R -V "RancherOS" \ + -l -J -R -V "${DISTRIB_ID}" \ -no-emul-boot -boot-load-size 4 -boot-info-table \ -b boot/isolinux/isolinux.bin -c boot/isolinux/boot.cat \ -isohybrid-mbr /usr/lib/ISOLINUX/isohdpfx.bin \ - -o ${DIST}/artifacts/rancheros.iso ${CD} + -o ${ARTIFACTS}/$(echo ${DISTRIB_ID} | tr '[:upper:]' '[:lower:]').iso ${CD} diff --git a/scripts/package-rootfs b/scripts/package-rootfs new file mode 100755 index 00000000..68c10c3c --- /dev/null +++ b/scripts/package-rootfs @@ -0,0 +1,33 @@ +#!/bin/bash +set -e +set -o pipefail + +cd $(dirname $0)/.. + +source scripts/version + +BUILD=$(pwd)/build +IMAGE_CACHE=${BUILD}/image-cache +PREPOP_DIR=${IMAGE_CACHE}/var/lib/system-docker +INITRD_DIR=${BUILD}/initrd +ARTIFACTS=$(pwd)/dist/artifacts +INITRD=${ARTIFACTS}/initrd + +mkdir -p ${ARTIFACTS} ${PREPOP_DIR} + +if [ "$(docker info | grep 'Storage Driver: ' | sed 's/Storage Driver: //')" != "overlay" ]; then + echo Overlay storage driver is require do create rootfs.tar 1>&2 + exit 1 +fi + +DFS=$(docker run -d --privileged -v /lib/modules/$(uname -r):/lib/modules/$(uname -r) ${DFS_IMAGE}${SUFFIX} ${DFS_ARGS}) +trap "docker rm -fv ${DFS_ARCH} ${DFS}" EXIT +docker exec -i ${DFS} docker load < ${INITRD_DIR}/usr/share/ros/images.tar +docker stop ${DFS} +docker run --rm --volumes-from=${DFS} rancher/os-base tar -c -C /var/lib/docker ./image | tar -x -C ${PREPOP_DIR} +docker run --rm --volumes-from=${DFS} rancher/os-base tar -c -C /var/lib/docker ./overlay | tar -x -C ${PREPOP_DIR} + +tar -cf ${ARTIFACTS}/rootfs.tar --exclude lib/modules --exclude lib/firmware -C ${INITRD_DIR} . +tar -rf ${ARTIFACTS}/rootfs.tar --exclude lib/modules --exclude lib/firmware -C ${INITRD_DIR} . +rm -f ${ARTIFACTS}/rootfs.tar.gz +gzip ${ARTIFACTS}/rootfs.tar diff --git a/scripts/prepare b/scripts/prepare new file mode 100755 index 00000000..a05c292b --- /dev/null +++ b/scripts/prepare @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +source $(dirname $0)/version + +cd $(dirname $0) + +if [ ! -e ../bin/host_ros ]; then + ./build +fi + +./template +./build-images +./tar-images +./layout diff --git a/scripts/release b/scripts/release new file mode 100755 index 00000000..80a80a63 --- /dev/null +++ b/scripts/release @@ -0,0 +1,6 @@ +#!/bin/bash + +source $(dirname $0)/version +export REPO_VERSION=$VERSION + +exec $(dirname $0)/ci diff --git a/scripts/run b/scripts/run index 4baee049..01086b41 100755 --- a/scripts/run +++ b/scripts/run @@ -1,10 +1,9 @@ #!/bin/bash set -e +# help: build and run RancherOS, requires KVM local cd $(dirname $0)/.. -source scripts/build-common - HOST_ARCH=${HOST_ARCH:-amd64} ARCH=${ARCH:-amd64} @@ -34,11 +33,13 @@ cd_arm() { } declare -A cd=( ["amd64"]="cd_amd64" ["arm"]="cd_arm" ["arm64"]="cd_arm" ) +BUILD=build BASE=$(pwd) UNAME=$(uname) KERNEL=${BASE}/dist/artifacts/vmlinuz -INITRD=${BASE}/dist/artifacts/initrd +INITRD_SRC=${BASE}/build/initrd +INITRD=${BASE}/build/initrd.tmp QEMU=1 FORMAT=1 @@ -103,42 +104,17 @@ if [ "$QEMU" == "1" ] && ! which qemu-system-${QEMUARCH}; then QEMU=0 fi -if [[ ! -e ${KERNEL} || ! -e ${INITRD} ]]; then - echo "Failed to find ${KERNEL} or ${INITRD}" 1>&2 +if [[ ! -e ${KERNEL} || ! -e ${INITRD_SRC} ]]; then + echo "Failed to find ${KERNEL} or ${INITRD_SRC}" 1>&2 exit 1 fi -# Linux and Darwin SHA1 sum binary are different, pick which to use -if [ "$UNAME" == "Darwin" ]; then sha1sum=$(which shasum) -elif [ "$UNAME" == "Linux" ]; then sha1sum=$(which sha1sum); -fi - -if [ "$REBUILD" == "1" ]; then - INITRD_TMP=${BUILD}/$(${sha1sum} ${INITRD} | awk '{print $1}') - INITRD_CURRENT=${BUILD}/initrd-current - INITRD_TEST=${BUILD}/initrd.test - - if [ ! -d ${INITRD_TMP} ]; then - mkdir -p ${INITRD_TMP} - pushd ${INITRD_TMP} - xz -dfc ${INITRD} | gunzip -f | cpio -idm - popd - fi - - if [ -e ${INITRD_CURRENT} ]; then - rm -f ${INITRD_CURRENT} - fi - - ln -s ${INITRD_TMP} ${INITRD_CURRENT} - - mkdir -p ${INITRD_TMP}/usr/{bin,share/ros} - cp bin/ros ${INITRD_TMP}/usr/bin/ - cp -f build/os-config.yml ${INITRD_TMP}/usr/share/ros/ - - pushd ${INITRD_TMP} - find . | cpio -H newc -o | gzip > ${INITRD_TEST} - popd - INITRD=${INITRD_TEST} +if [ "$REBUILD" == "1" ] || [ ! -e ${INITRD} ]; then + cp bin/ros ${INITRD_SRC}/usr/bin/ros + ./scripts/hash-initrd + pushd ${INITRD_SRC} >/dev/null + find . | cpio -H newc -o | gzip -1 > ${INITRD} + popd >/dev/null fi KERNEL_ARGS="quiet rancher.password=rancher console=${TTYCONS} ${QEMU_APPEND}" @@ -177,27 +153,13 @@ if [ "$QEMU" == "1" ]; then echo "- $(<${BASE}/assets/rancher.key.pub)" >> ${USER_DATA} fi - CCIMG=${BUILD}/cc.img - CCDEV=/dev/loop$(uuid) - mknod $CCDEV b 7 0 - dd if=/dev/zero of=${CCIMG} bs=1024 count=512 - losetup ${CCDEV} ${CCIMG} - mkfs.vfat -n "config-2" ${CCDEV} - - CCROOT_TMP=$(mktemp -d) - mount ${CCDEV} ${CCROOT_TMP} - cp -a ${CCROOT}/* ${CCROOT_TMP}/ - umount ${CCROOT_TMP} && rmdir ${CCROOT_TMP} - losetup -d ${CCDEV} - rm ${CCDEV} - - CPU=${cpu["$ARCH"]} if [ "$KVM" == "1" ] && [ "$ARCH" == "$HOST_ARCH" ]; then KVM_ENABLE="-enable-kvm" CPU="-cpu host" fi set -x + HOME=${HOME:-/} exec qemu-system-${QEMUARCH} -serial stdio \ -rtc base=utc,clock=host \ ${KVM_ENABLE} \ @@ -207,12 +169,15 @@ if [ "$QEMU" == "1" ]; then -initrd ${INITRD} \ -m 2048 \ ${network["$ARCH"]} \ - $(eval "${cd["$ARCH"]} ${CCIMG}") \ $(eval "${hd["$ARCH"]} ${HD}") \ -smp 1 \ -append "${KERNEL_ARGS}" \ -nographic \ -display none \ + -fsdev local,security_model=passthrough,readonly,id=fsdev0,path=${CCROOT} \ + -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=config-2 \ + -fsdev local,security_model=none,id=fsdev1,path=${HOME} \ + -device virtio-9p-pci,id=fs1,fsdev=fsdev1,mount_tag=home \ ${QEMU_ARGS} \ "${@}" diff --git a/scripts/shell b/scripts/shell new file mode 100755 index 00000000..811f0cfd --- /dev/null +++ b/scripts/shell @@ -0,0 +1,4 @@ +#!/bin/bash +# help: Launch shell in build environment + +exec bash diff --git a/scripts/shell.sh b/scripts/shell.sh deleted file mode 100755 index f171e10e..00000000 --- a/scripts/shell.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -e - -cd $(dirname $0)/.. -. ./scripts/dapper-common - -exec dapper -d -s diff --git a/scripts/tar-images b/scripts/tar-images new file mode 100755 index 00000000..93b3afdd --- /dev/null +++ b/scripts/tar-images @@ -0,0 +1,13 @@ +#!/bin/bash +set -e + +cd $(dirname $0)/.. + +IMAGES=$(bin/host_ros c images -i build/initrd/usr/share/ros/os-config.yml) +for i in $IMAGES; do + if [ "${FORCE_PULL}" = "1" ] || ! docker inspect $i >/dev/null 2>&1; then + docker pull $i + fi +done + +docker save ${IMAGES} > build/images.tar diff --git a/scripts/template b/scripts/template new file mode 100755 index 00000000..ad41a5ab --- /dev/null +++ b/scripts/template @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +source $(dirname $0)/version + +cd $(dirname $0)/.. + +OUTPUT=build/initrd/usr/share/ros +mkdir -p $OUTPUT +./bin/host_ros c generate < os-config.tpl.yml > $OUTPUT/os-config.yml diff --git a/scripts/test b/scripts/test index 222dd5db..e188270f 100755 --- a/scripts/test +++ b/scripts/test @@ -1,17 +1,14 @@ #!/bin/bash +# help: Run go unit tests set -e -if [[ ! -x "$(which go)" && -x /usr/local/go/bin/go ]]; then - PATH=/usr/local/go/bin:${PATH} -fi - cd $(dirname $0)/.. -result=$(find . -name "*.go" | grep -v ./Godeps | xargs gofmt -l) -for i in $result; do - echo $i -done +echo Running tests -[ -n "$result" ] && exit 1 +PACKAGES=". $(find -name '*.go' | xargs -I{} dirname {} | cut -f2 -d/ | sort -u | grep -Ev '(^\.$|.git|.trash-cache|vendor|bin)' | sed -e 's!^!./!' -e 's!$!/...!')" -echo OK +if [ "$ARCH" = "amd64" ]; then + RACE="-race" +fi +go test $RACE -cover -tags=test ${PACKAGES} diff --git a/scripts/unit-test b/scripts/unit-test deleted file mode 100755 index 6e8e1ab9..00000000 --- a/scripts/unit-test +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -e - -cd $(dirname $0)/.. - -PACKAGES="$(find -name '*.go' | xargs -I{} dirname {} | cut -f2 -d/ | sort -u | grep -Ev '(^\.$|.git|.trash-cache|vendor)' | sed -e 's!^!./!' -e 's!$!/...!')" - -go test -race -cover -tags=test $PACKAGES diff --git a/scripts/validate b/scripts/validate new file mode 100755 index 00000000..17f8372e --- /dev/null +++ b/scripts/validate @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +cd $(dirname $0)/.. + +echo Running validation + +PACKAGES=". $(find -name '*.go' | xargs -I{} dirname {} | cut -f2 -d/ | sort -u | grep -Ev '(^\.$|.git|.trash-cache|vendor|bin)' | sed -e 's!^!./!' -e 's!$!/...!')" + +echo Running: go vet +go vet ${PACKAGES} +echo Running: golint +for i in ${PACKAGES}; do + if [ -n "$(golint $i | grep -v 'should have comment.*or be unexported' | tee /dev/stderr)" ]; then + failed=true + fi +done +test -z "$failed" +echo Running: go fmt +test -z "$(go fmt ${PACKAGES} | tee /dev/stderr)" diff --git a/scripts/vendor-cleanup.sh b/scripts/vendor-cleanup.sh deleted file mode 100755 index bddb5b99..00000000 --- a/scripts/vendor-cleanup.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -set -e - -cd $(dirname $0)/.. - -package=$(go list) -prefix="${package}/vendor" -require="github.com/stretchr/testify/require" # the only test import - -imports=( ) -importsLen=${#imports[@]} - -collectImports() { - imports=( $(GOOS=linux go list -f '{{join .Deps "\n"}}' | egrep "^${prefix}/" | sed s%"^${package}.*/vendor/"%./vendor/%) ) - imports=( - "${imports[@]}" "./vendor/${require}" - $(GOOS=linux go list -f '{{join .Deps "\n"}}' "${prefix}/${require}" | egrep "^${prefix}/" | sed s%"^${package}.*/vendor/"%./vendor/%) - ) - echo importsLen: $importsLen - echo collected imports: ${#imports[@]} -} - -nonImports() { - while read path; do - skip=0 - for i in "${imports[@]}"; do - [[ "${i}" == "${path}" || ${i} = ${path}/* ]] && skip=1 && break - done - [ "$skip" == "0" ] && echo ${path} - done -} - -collectImports - -while [ ${#imports[@]} != ${importsLen} ]; do - importsLen=${#imports[@]} - echo '=====> Collected imports' - for i in "${imports[@]}"; do - echo ${i} - done - - echo '=====> Removing unused packages' - find ./vendor -type d | nonImports | xargs -I{} rm -rf {} - - echo '=====> Removing empty dirs' - emptyDirs=( $(find ./vendor -type d -empty) ) - while [ ${#emptyDirs[@]} -gt 0 ]; do - rmdir ${emptyDirs[@]} - emptyDirs=( $(find ./vendor -type d -empty) ) - done - - collectImports -done - -echo '=====> Done!' diff --git a/scripts/version b/scripts/version new file mode 100755 index 00000000..f99dbde8 --- /dev/null +++ b/scripts/version @@ -0,0 +1,24 @@ +#!/bin/bash + +if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + DIRTY="-dirty" +fi + +COMMIT=$(git rev-parse --short HEAD) +GIT_TAG=$(git tag -l --contains HEAD | head -n 1) + +if [ -z "$VERSION" ]; then + if [[ -z "$DIRTY" && -n "$GIT_TAG" ]]; then + VERSION=$GIT_TAG + else + VERSION="${COMMIT}${DIRTY}" + fi +fi + +export VERSION COMMIT + +# Suffix +export SUFFIX="" +if [ -n "${ARCH}" ] && [ "${ARCH}" != "amd64" ]; then + SUFFIX="_${ARCH}" +fi diff --git a/tests/integration/assets/test_01/cloud-config.yml b/tests/integration/assets/test_01/cloud-config.yml index cf05068d..63d0d0d0 100644 --- a/tests/integration/assets/test_01/cloud-config.yml +++ b/tests/integration/assets/test_01/cloud-config.yml @@ -15,6 +15,5 @@ rancher: mtu: 1500 docker: args: [daemon, --log-opt, max-file=2, --log-opt, max-size=25m, -s, overlay, -G, docker, -H, 'unix:///var/run/docker.sock', --userland-proxy=false] - tls: true ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUlsWAL5Rf0Wis/A7k7Tlqx0fZS60VzCZrPZYbP/wkL95jv0XzCx8bd1rZHeybblHPDNpND3BLv4qPY5DxRyexF4seGuzcJI/pOvGUGjQondeMPgDTFEo5w939gSdeTZcfXzQ0wAVhzwDbgH4zPfMzbdoo8Aiu9jkKljXw8IFju0gh+t6iKkGZCIjKT9o7zza1vGfkodhvi2V3VzPdNO28gaxZaRNtmBYUoVnGyR6nXN1Q3CJaVuh5o6GPCOqrhHNbYOFZKBpDiHbxPhVpxHQD2+8yUSGTG7WW75FfZePja5y8d0c/O5L37ZYx4AZAd3KgQYDBT2XCEJGQNawNbfpt diff --git a/tests/integration/assets/test_02/cloud-config.yml b/tests/integration/assets/test_02/cloud-config.yml new file mode 100644 index 00000000..af7e26d2 --- /dev/null +++ b/tests/integration/assets/test_02/cloud-config.yml @@ -0,0 +1,102 @@ +#cloud-config +rancher: + docker: + tls: true + ca_cert: |+ + -----BEGIN CERTIFICATE----- + MIIC0TCCAbmgAwIBAgIQEMQoBLQ2IMOqlCeG7l8+fzANBgkqhkiG9w0BAQsFADAS + MRAwDgYDVQQKEwdyYW5jaGVyMB4XDTE2MDYwNjE2MTYwMFoXDTE5MDUyMjE2MTYw + MFowEjEQMA4GA1UEChMHcmFuY2hlcjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC + AQoCggEBAND9PyJVU47CNsA5AjByvEv0KkD106JGHkQc+8lzVyTZw+TV6AvQm+Gt + jiYTgWzL+aGQXFDAK8EDSPzo0koNcFHJeQAJnCULZzm5irqwKZSMlDZSCRO0bJsm + CVpJpYlAc4wHb05nGtR3WB/XvudNWi9HuAZta7JAZ41LXCpC1VZ+K7EbSMsud1/w + 86nkqEU4FeiEbObiKUWS1sQSEs9mmaVg1qaFvorQEREyfXHl+ngwA7tlbl8pF3NS + Ti1Uod746LUSoO2ZmNgmrONsOwl8GYjZNGz+q1YcqeiD9G78rd5gG9uPvEPM89Zm + pGM4iNE/NYMcWv2WcYx0qC9rLR1GwQ8CAwEAAaMjMCEwDgYDVR0PAQH/BAQDAgKs + MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEmCbU+l9JilTEvF + L0bLFV3XXfN/YaC5tD3K5J2ReOGQcKuZodlpXJpYg/QbcdMbn+N58VuKtIiphU9l + oLbJx0a9mbN9PSuzOo4Ln24SVfEEAZI39BdgMH5LiTLE/7KXgqqPoCLk7dWOkiOn + QTNCJgP84PsMXcXnkZ2bcQjApvQ99kMqcl/TL9bBLCzGC0ZoG+G9dnzHjDd2WbKg + k/3gGJo6vWZD1WOYwxWAqtFEw2iWYRXmAQ1AqzJT83dLpUt1Dh3yP7/p7LAC/s1c + xW0SpP/lE1MjNc2eWAdpEgvBT6ry3WzLekaBgCRlVpkb/rCpWQp3Ocwyoj04qMdC + d59A+HI= + -----END CERTIFICATE----- + ca_key: |+ + -----BEGIN RSA PRIVATE KEY----- + MIIEpAIBAAKCAQEA0P0/IlVTjsI2wDkCMHK8S/QqQPXTokYeRBz7yXNXJNnD5NXo + C9Cb4a2OJhOBbMv5oZBcUMArwQNI/OjSSg1wUcl5AAmcJQtnObmKurAplIyUNlIJ + E7RsmyYJWkmliUBzjAdvTmca1HdYH9e+501aL0e4Bm1rskBnjUtcKkLVVn4rsRtI + yy53X/DzqeSoRTgV6IRs5uIpRZLWxBISz2aZpWDWpoW+itARETJ9ceX6eDADu2Vu + XykXc1JOLVSh3vjotRKg7ZmY2Cas42w7CXwZiNk0bP6rVhyp6IP0bvyt3mAb24+8 + Q8zz1makYziI0T81gxxa/ZZxjHSoL2stHUbBDwIDAQABAoIBAQCo+aobW3w0+CkG + oNF5VLuUefXUEi8sjJ8aGYknZ7+1BvHRy3ZUXzY6cXZ2qNzDl+Td0fgiPk7iP4K7 + IpAs2dLP/iN8eUir1x1+WdumeJsWBdgsV4YJTZ9mjomPW+6hG+CQ/s3rSYgy88/n + 5yvunudlRQqw/7XNKS/Q2XbKoMEXrXMF4yuMzhFfajw3c9boLJYpLArwau3b4UAq + Zx1tDDs5jSiCTdjySDfsbju59Fx68Pb6edeOUhKlNp22MlLRFwKPYEUI/6PCLA7h + sIoL1c3UEH4Tl64e4TgP9kIVlpB6s55cPkFnfm/XvJ14ipLnFhC3NUAWr5iNIxEi + vjP/vbgxAoGBAPnS1SYkgBsMy1BBPiglJu1OrSQsG8JVRtAfDbNGxpF5jcbjOAQV + RWqWrnVvpWt34B0cohKiM0F1YgjPUsy2fEgLr6YTk5ZCxBk4PnJqOfwpkygP7KGR + VHgJNdiX4SPTDjy1roZWnZvfxrHwKTRIhYY3VoCWMFGLYlzMEukWM++3AoGBANYn + 99CWrpnxvhBuU6dKqoSwf43QOyCPDKU0uqtahw2n2BhrDO+gM0IFPh5Mi7rWhmWn + er2VXZrwXJTxUxLrCO/N68IzJp8uxEDr1mS+vTDiz5ix2+pr1BbolZOLHpUipi9x + atG1oIM8Sw6kvl8tyHvQQQNlmTHD0s51joat1AlpAoGBAOf36W0aVU1IqvxhKEr0 + fFm7RS+iOUBQGImlW/5MSJLJ0GiNkPTRn3wiX+mxemL4k0PU03UD4R311cqiX5qw + E2R+XWGTKeZLJnTYcbuhgSfwnrCDYNCA9nLi8nmkRSwTjFO4y0333S7gMUoF2uyu + LjV66rpJOqJtDy9lWmXN9PmvAoGATqGINRdObom7To8jufYJXATuIKTHQPIlI3eT + 3pyzn8jz6CtOKaG5kFEaeMeEOorP9/0hbQCtyNjeNXXSGc0gj+Qc30YmtSXXuzqc + kosSLiPpM1iCtbT3v52QZgcbqIh7WkobfXphwC3gJTVKDOpjhUp2xIeGUyZifZne + RgcHJpkCgYA4yfNbzKPKF4sGp7CNKPnpAsE6LmK0kWBfQQAGbe7IlD90DFLgH9vz + 9erDf1oX4lrnkTtbNxbW1jrMSaAjXS6PyOr6/Qd6XoCgpcEv83Kf7/A0SUuaWTJk + yeXy5mu3kosqk+GKvaqSzVlmJG0O6awbG1BFK51xWq1LImmVSkwjjw== + -----END RSA PRIVATE KEY----- + server_cert: |+ + -----BEGIN CERTIFICATE----- + MIIDDTCCAfWgAwIBAgIRAMiHeN7t0NKghK3RSflXZ5MwDQYJKoZIhvcNAQELBQAw + EjEQMA4GA1UEChMHcmFuY2hlcjAeFw0xNjA2MDYxNjE2MDBaFw0xOTA1MjIxNjE2 + MDBaMBIxEDAOBgNVBAoTB3JhbmNoZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw + ggEKAoIBAQD8iRzgcfhfUuurgEzefUIMeHJRu5OF1ILsekYpJKWesYhhvA47BC1+ + Nm96BLbfmpp5A7F+ZNQvmO8tNBnnHXWPVbeBmDayLWRhzRkDbPWRx4q9ciIhUsNe + iAeF+iAVJ+S7XTFnRPY7NS+boisuaNG1ecA4XIH/dRmd50DfGfvv6Ntv22ffV1pA + 2vmqIT0O19Bw60jIB7UJSUFofPmpo60TJH7wFusqwttCXjbHbOz/+iKP+eKLksMa + 6oYdwd+hZyHqNMCDDEryQjsnUW9+1IoVattaa/2Y+/aWaczNzbcI2xcrG76lHnD7 + Gqj8rthzv+0XP63cq5dG/KIyo11TcEXNAgMBAAGjXjBcMA4GA1UdDwEB/wQEAwID + qDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADAd + BgNVHREEFjAUgglsb2NhbGhvc3SCASqHBH8AAAEwDQYJKoZIhvcNAQELBQADggEB + AHVIh+WjfMM71PJz+fEdAIhWaxKpv9x27ZcWkReIprxdO+2s7ltZgyFpZgAGn5Zo + TSMfKkuCbjni0j+dhgWzrExVDF0sbyyYmnpskykA1lC4CLuPvdrXt55Kje/ZUmPO + B7vfWjDeae+p3iZ9sXRcJTNhIO2GtA+gKE+9PkNRG1X9H2EOvVkZoDDaxaMZVU85 + XteKadiTwSiKkopRyyM9uhOPVg1nFtUcC4M+p1NgdSqp2gccpf6fLYocnrJrvuaG + ci0e+nDcgYYXGj9Cl1OxDA0QIWuhWBKZPdS4RNwu8boONMqM+CGe8CibbFKBEt2R + ZiC3i7FAxsmSVLItyaRB8EI= + -----END CERTIFICATE----- + server_key: |+ + -----BEGIN RSA PRIVATE KEY----- + MIIEpQIBAAKCAQEA/Ikc4HH4X1Lrq4BM3n1CDHhyUbuThdSC7HpGKSSlnrGIYbwO + OwQtfjZvegS235qaeQOxfmTUL5jvLTQZ5x11j1W3gZg2si1kYc0ZA2z1kceKvXIi + IVLDXogHhfogFSfku10xZ0T2OzUvm6IrLmjRtXnAOFyB/3UZnedA3xn77+jbb9tn + 31daQNr5qiE9DtfQcOtIyAe1CUlBaHz5qaOtEyR+8BbrKsLbQl42x2zs//oij/ni + i5LDGuqGHcHfoWch6jTAgwxK8kI7J1FvftSKFWrbWmv9mPv2lmnMzc23CNsXKxu+ + pR5w+xqo/K7Yc7/tFz+t3KuXRvyiMqNdU3BFzQIDAQABAoIBAQDUiMDY1JJoB214 + ZP5MsbaIsEXmK6u4kmWtiCrMLZ+Fs3xIZPDFEnsHIeEoHBeckI05E7ap3UoG1PtW + W+cA73YlL8rFMsm1oyY8eVR34Ze7HOjTD99RlEoAoRNT2nJt26lRVtlhRFTG97gd + j5ov8N+tj84KoTB3QqJQlnOuUDwMZ41roFOLEooXSA00qDFENlpBQsEtW+3Ga5ut + INH0CQnynIqt05p24oGxaLjrnmqbEhhJtAyGNHgIJAGoEmPwtPkPcd56QDnoO1wk + 4fBiHgdqUfj21rBFgsIuW3NGzHvtJXopS8kaR3NaIIBCfRxxytLgtNNNW77W54ig + MvuJZr0BAoGBAP3VnIGlVZrIBzsWgPKWyPZ2s9jVR5Ub19x+6wwaGqyH19mQaGxw + x1Wic/F4dF4qqoCwyRRkpyHmGPfChgYMEp0jff5MD2Q4JfxEtGP9agFOV0cJkJr6 + pY3zSbsmft+K4NhEBVMAQfFgb5mb5eSEWl+SI/jn6ee0PyvcI2LzXZVxAoGBAP6w + qiy9wbXFtzA7RC2sBlGFyZekC81DTVSIv586kMDY8oqg34Z07s1JuQYvIlavJ9lw + e50vLW8h3O2r4dge7v6CKAlbeaRtaQfpXJRezH9YQJ9lTJoXY6W7LbnBAPcexFps + J/2rul4RSLUZPuLSpGAcYall77o3rwn9oOocL9gdAoGBAJh/Vhh5iRWFaPqxyWR0 + /GU96Uyyzd+iK6x3v6S0piPTNPWrkWDc3JTxFXET6e2M+oR4MUYENnjiMUvgXP6T + EDfB0/cMIQ8XwJJvgGS2IZKJS1wNPggt33qJmFlMhlqsp+ql6wDznapzQnjptVL1 + xQm31c67HcarfmxORCA1j5qxAoGAIeJBEajBL7y5LWqFHIppYMkq08jYZRuSGzAC + Rl3VSkLSqczTUCEYcClhu0fkCqJM6+nCGFxhcAqSSPB4IHelFikcyHnqCg0gTxQl + 4/tku4BzQIGKmmmIMVFguPjLdxUZzGndPCtvpPopsSZFromVos/D0nSkWyLzX6Hl + mZ/cYaUCgYEA2xufKaPgEdYcuI1rYKHWvhb0DI6QRK/0EfPbo6jkSdpYPBXfJ61F + JkGZVxqZI7r/U55RMmdgCpRAZCISCOm0jepaR32v5Ckan88UTyu8EuXNhvMK2jWP + DJ+16tv9ZXGwkAVJNpVv0Ze6E4yiNZz3Nuq16nBR5QeQmQSGOwbMRRI= + -----END RSA PRIVATE KEY----- + +ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUlsWAL5Rf0Wis/A7k7Tlqx0fZS60VzCZrPZYbP/wkL95jv0XzCx8bd1rZHeybblHPDNpND3BLv4qPY5DxRyexF4seGuzcJI/pOvGUGjQondeMPgDTFEo5w939gSdeTZcfXzQ0wAVhzwDbgH4zPfMzbdoo8Aiu9jkKljXw8IFju0gh+t6iKkGZCIjKT9o7zza1vGfkodhvi2V3VzPdNO28gaxZaRNtmBYUoVnGyR6nXN1Q3CJaVuh5o6GPCOqrhHNbYOFZKBpDiHbxPhVpxHQD2+8yUSGTG7WW75FfZePja5y8d0c/O5L37ZYx4AZAd3KgQYDBT2XCEJGQNawNbfpt diff --git a/tests/integration/assets/test_03/cloud-config.yml b/tests/integration/assets/test_03/cloud-config.yml index 43d347f9..839f7782 100644 --- a/tests/integration/assets/test_03/cloud-config.yml +++ b/tests/integration/assets/test_03/cloud-config.yml @@ -1,6 +1,5 @@ #cloud-config rancher: - services_include: - debian-console: true + console: debian ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUlsWAL5Rf0Wis/A7k7Tlqx0fZS60VzCZrPZYbP/wkL95jv0XzCx8bd1rZHeybblHPDNpND3BLv4qPY5DxRyexF4seGuzcJI/pOvGUGjQondeMPgDTFEo5w939gSdeTZcfXzQ0wAVhzwDbgH4zPfMzbdoo8Aiu9jkKljXw8IFju0gh+t6iKkGZCIjKT9o7zza1vGfkodhvi2V3VzPdNO28gaxZaRNtmBYUoVnGyR6nXN1Q3CJaVuh5o6GPCOqrhHNbYOFZKBpDiHbxPhVpxHQD2+8yUSGTG7WW75FfZePja5y8d0c/O5L37ZYx4AZAd3KgQYDBT2XCEJGQNawNbfpt diff --git a/tests/integration/assets/test_05/cloud-config.yml b/tests/integration/assets/test_05/cloud-config.yml index 43d347f9..839f7782 100644 --- a/tests/integration/assets/test_05/cloud-config.yml +++ b/tests/integration/assets/test_05/cloud-config.yml @@ -1,6 +1,5 @@ #cloud-config rancher: - services_include: - debian-console: true + console: debian ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUlsWAL5Rf0Wis/A7k7Tlqx0fZS60VzCZrPZYbP/wkL95jv0XzCx8bd1rZHeybblHPDNpND3BLv4qPY5DxRyexF4seGuzcJI/pOvGUGjQondeMPgDTFEo5w939gSdeTZcfXzQ0wAVhzwDbgH4zPfMzbdoo8Aiu9jkKljXw8IFju0gh+t6iKkGZCIjKT9o7zza1vGfkodhvi2V3VzPdNO28gaxZaRNtmBYUoVnGyR6nXN1Q3CJaVuh5o6GPCOqrhHNbYOFZKBpDiHbxPhVpxHQD2+8yUSGTG7WW75FfZePja5y8d0c/O5L37ZYx4AZAd3KgQYDBT2XCEJGQNawNbfpt diff --git a/tests/integration/assets/test_10/cloud-config.yml b/tests/integration/assets/test_10/cloud-config.yml index a42d3e9d..e4fc4807 100644 --- a/tests/integration/assets/test_10/cloud-config.yml +++ b/tests/integration/assets/test_10/cloud-config.yml @@ -2,4 +2,4 @@ rancher: cloud_init: datasources: - - url:https://s3-us-west-2.amazonaws.com/rancher-os-test/cloud-config.yml + - url:https://gist.githubusercontent.com/joshwget/0bdc616cd26162ad87c535644c8b1ef6/raw/8cce947c08cf006e932b71d92ddbb96bae8e3325/gistfile1.txt diff --git a/tests/integration/assets/test_18/cloud-config.yml b/tests/integration/assets/test_18/cloud-config.yml new file mode 100644 index 00000000..155a1d98 --- /dev/null +++ b/tests/integration/assets/test_18/cloud-config.yml @@ -0,0 +1,10 @@ +#cloud-config +rancher: + console: debian + services: + missing_image: + image: tianon/true + labels: + io.rancher.os.scope: system +ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC85w9stZyiLQp/DkVO6fqwiShYcj1ClKdtCqgHtf+PLpJkFReSFu8y21y+ev09gsSMRRrjF7yt0pUHV6zncQhVeqsZtgc5WbELY2DOYUGmRn/CCvPbXovoBrQjSorqlBmpuPwsStYLr92Xn+VVsMNSUIegHY22DphGbDKG85vrKB8HxUxGIDxFBds/uE8FhSy+xsoyT/jUZDK6pgq2HnGl6D81ViIlKecpOpWlW3B+fea99ADNyZNVvDzbHE5pcI3VRw8u59WmpWOUgT6qacNVACl8GqpBvQk8sw7O/X9DSZHCKafeD9G5k+GYbAUz92fKWrx/lOXfUXPS3+c8dRIF diff --git a/tests/integration/assets/test_19/cloud-config.yml b/tests/integration/assets/test_19/cloud-config.yml new file mode 100644 index 00000000..88715293 --- /dev/null +++ b/tests/integration/assets/test_19/cloud-config.yml @@ -0,0 +1,6 @@ +#cloud-config +rancher: + services_include: + https://gist.githubusercontent.com/joshwget/a8dd813f35cee3cf562de4454217a533/raw/5a991bb8c37bf2a1a405f5a898231d925a78ad3d/gistfile1.txt: true +ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC85w9stZyiLQp/DkVO6fqwiShYcj1ClKdtCqgHtf+PLpJkFReSFu8y21y+ev09gsSMRRrjF7yt0pUHV6zncQhVeqsZtgc5WbELY2DOYUGmRn/CCvPbXovoBrQjSorqlBmpuPwsStYLr92Xn+VVsMNSUIegHY22DphGbDKG85vrKB8HxUxGIDxFBds/uE8FhSy+xsoyT/jUZDK6pgq2HnGl6D81ViIlKecpOpWlW3B+fea99ADNyZNVvDzbHE5pcI3VRw8u59WmpWOUgT6qacNVACl8GqpBvQk8sw7O/X9DSZHCKafeD9G5k+GYbAUz92fKWrx/lOXfUXPS3+c8dRIF diff --git a/tests/integration/rostest/test_00_system.py b/tests/integration/rostest/test_00_system.py index 23cefb1f..ee0a5194 100644 --- a/tests/integration/rostest/test_00_system.py +++ b/tests/integration/rostest/test_00_system.py @@ -13,9 +13,7 @@ def qemu(request): @pytest.mark.timeout(30) def test_system_boot(qemu): - version = u.rancheros_version('./build.conf') - print('parsed version: ' + version) - u.flush_out(qemu.stdout, 'RancherOS {v} started'.format(v=version)) + u.flush_out(qemu.stdout) busybox = {'amd64': 'busybox', 'arm': 'armhf/busybox', 'arm64': 'aarch64/busybox'} diff --git a/tests/integration/rostest/test_01_cloud_config.py b/tests/integration/rostest/test_01_cloud_config.py index 87ed91d5..b0a88c51 100644 --- a/tests/integration/rostest/test_01_cloud_config.py +++ b/tests/integration/rostest/test_01_cloud_config.py @@ -72,7 +72,10 @@ def test_services_include(qemu, cloud_config): def test_docker_tls_args(qemu, cloud_config): SSH(qemu, ssh_command).check_call(''' set -e -x +sudo ros tls gen --server -H localhost sudo ros tls gen +sudo ros c set rancher.docker.tls true +sudo system-docker restart docker sleep 5 docker --tlsverify version '''.strip()) @@ -88,10 +91,13 @@ ip route get to 10.10.2.120 cloud_config['rancher']['network']['interfaces']['mac=52:54:00:12:34:59']['address'] -def test_docker_not_pid_one(qemu): +def test_docker_pid_one(qemu): SSH(qemu, ssh_command).check_call(''' set -e -x for i in $(pidof docker); do - [ $i != 1 ] + if [ $i = 1 ]; then + found=true + fi done +[ "$found" = "true" ] '''.strip()) diff --git a/tests/integration/rostest/test_02_tls.py b/tests/integration/rostest/test_02_tls.py new file mode 100644 index 00000000..b5c7bf19 --- /dev/null +++ b/tests/integration/rostest/test_02_tls.py @@ -0,0 +1,22 @@ +import pytest +import rostest.util as u +from rostest.util import SSH + +ssh_command = ['./scripts/ssh', '--qemu', '--key', './tests/integration/assets/test.key'] +cloud_config_path = './tests/integration/assets/test_02/cloud-config.yml' + + +@pytest.fixture(scope="module") +def qemu(request): + q = u.run_qemu(request, ['--cloud-config', cloud_config_path]) + u.flush_out(q.stdout) + return q + + +@pytest.mark.timeout(40) +def test_docker_tls_args(qemu): + SSH(qemu, ssh_command).check_call(''' +set -e -x +sudo ros tls gen +docker --tlsverify version + '''.strip()) diff --git a/tests/integration/rostest/test_10_network_from_url.py b/tests/integration/rostest/test_10_network_from_url.py index fa9dd66c..550ad4d1 100644 --- a/tests/integration/rostest/test_10_network_from_url.py +++ b/tests/integration/rostest/test_10_network_from_url.py @@ -28,9 +28,6 @@ set -x -e ip link show dev br0 ip link show dev br0.100 | grep br0.100@br0 ip link show dev eth1.100 | grep 'master br0' -ip link show dev eth6 | grep 'master bond0' -ip link show dev eth7 | grep 'master bond0' -[ "$( 0 { - for _, url := range urls { - ymlUrl := fmt.Sprintf("%s/%s/%s.yml", url, location[0:1], location) - bytes, err = LoadResource(ymlUrl, network, []string{}) - if err == nil { - log.Debugf("Loaded %s from %s", location, ymlUrl) - return bytes, nil - } + } + + return nil, ErrNotFound +} + +func serviceUrl(url, name string) string { + return fmt.Sprintf("%s/%s/%s.yml", url, name[0:1], name) +} + +func LoadServiceResource(name string, useNetwork bool, cfg *config.CloudConfig) ([]byte, error) { + bytes, err := LoadResource(name, useNetwork) + if err == nil { + log.Debugf("Loaded %s from %s", name, name) + return bytes, nil + } + if err == ErrNoNetwork || !useNetwork { + return nil, ErrNoNetwork + } + + urls := cfg.Rancher.Repositories.ToArray() + for _, url := range urls { + serviceUrl := serviceUrl(url, name) + bytes, err = LoadResource(serviceUrl, useNetwork) + if err == nil { + log.Debugf("Loaded %s from %s", name, serviceUrl) + return bytes, nil } } diff --git a/util/network/network_test.go b/util/network/network_test.go index a441d17d..4dddbd03 100644 --- a/util/network/network_test.go +++ b/util/network/network_test.go @@ -16,7 +16,7 @@ func NoTestLoadResourceSimple(t *testing.T) { ` expected = strings.TrimSpace(expected) - b, e := LoadResource("https://raw.githubusercontent.com/rancher/os-services/v0.3.4/index.yml", true, []string{}) + b, e := LoadResource("https://raw.githubusercontent.com/rancher/os-services/v0.3.4/index.yml", true) assert.Nil(e) assert.Equal(expected, strings.TrimSpace(string(b))) diff --git a/util/util.go b/util/util.go index ee1d0ffb..b75b7898 100644 --- a/util/util.go +++ b/util/util.go @@ -1,17 +1,22 @@ package util import ( + "bufio" "bytes" - "io" + "errors" + "fmt" "io/ioutil" "os" + "path" "strings" yaml "github.com/cloudfoundry-incubator/candiedyaml" log "github.com/Sirupsen/logrus" +) - "reflect" +const ( + DOCKER_CGROUPS_FILE = "/proc/self/cgroup" ) type AnyMap map[interface{}]interface{} @@ -32,23 +37,33 @@ func Contains(values []string, value string) bool { type ReturnsErr func() error -func FileCopy(src, dest string) (err error) { - in, err := os.Open(src) +func FileCopy(src, dest string) error { + data, err := ioutil.ReadFile(src) if err != nil { return err } - defer func() { err = in.Close() }() + return WriteFileAtomic(dest, data, 0666) +} - out, err := os.Create(dest) +func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error { + dir, file := path.Split(filename) + tempFile, err := ioutil.TempFile(dir, fmt.Sprintf(".%s", file)) if err != nil { return err } - defer func() { err = out.Close() }() + defer os.Remove(tempFile.Name()) - if _, err := io.Copy(out, in); err != nil { + if _, err := tempFile.Write(data); err != nil { return err } - return + if err := tempFile.Close(); err != nil { + return err + } + if err := os.Chmod(tempFile.Name(), perm); err != nil { + return err + } + + return os.Rename(tempFile.Name(), filename) } func Convert(from, to interface{}) error { @@ -91,56 +106,7 @@ func Copy(d interface{}) interface{} { } } -func Replace(l, r interface{}) interface{} { - return r -} - -func Equal(l, r interface{}) interface{} { - if reflect.DeepEqual(l, r) { - return l - } - return nil -} - -func Filter(xs []interface{}, p func(x interface{}) bool) []interface{} { - return FlatMap(xs, func(x interface{}) []interface{} { - if p(x) { - return []interface{}{x} - } - return []interface{}{} - }) -} - -func FilterStrings(xs []string, p func(x string) bool) []string { - return FlatMapStrings(xs, func(x string) []string { - if p(x) { - return []string{x} - } - return []string{} - }) -} - -func Map(xs []interface{}, f func(x interface{}) interface{}) []interface{} { - return FlatMap(xs, func(x interface{}) []interface{} { return []interface{}{f(x)} }) -} - -func FlatMap(xs []interface{}, f func(x interface{}) []interface{}) []interface{} { - result := []interface{}{} - for _, x := range xs { - result = append(result, f(x)...) - } - return result -} - -func FlatMapStrings(xs []string, f func(x string) []string) []string { - result := []string{} - for _, x := range xs { - result = append(result, f(x)...) - } - return result -} - -func MapsUnion(left, right map[interface{}]interface{}) map[interface{}]interface{} { +func Merge(left, right map[interface{}]interface{}) map[interface{}]interface{} { result := MapCopy(left) for k, r := range right { @@ -149,12 +115,12 @@ func MapsUnion(left, right map[interface{}]interface{}) map[interface{}]interfac case map[interface{}]interface{}: switch r := r.(type) { case map[interface{}]interface{}: - result[k] = MapsUnion(l, r) + result[k] = Merge(l, r) default: - result[k] = Replace(l, r) + result[k] = r } default: - result[k] = Replace(l, r) + result[k] = r } } else { result[k] = Copy(r) @@ -164,66 +130,6 @@ func MapsUnion(left, right map[interface{}]interface{}) map[interface{}]interfac return result } -func MapsDifference(left, right map[interface{}]interface{}) map[interface{}]interface{} { - result := map[interface{}]interface{}{} - - for k, l := range left { - if r, ok := right[k]; ok { - switch l := l.(type) { - case map[interface{}]interface{}: - switch r := r.(type) { - case map[interface{}]interface{}: - if len(l) == 0 && len(r) == 0 { - continue - } else if len(l) == 0 { - result[k] = l - } else if v := MapsDifference(l, r); len(v) > 0 { - result[k] = v - } - default: - if v := Equal(l, r); v == nil { - result[k] = l - } - } - default: - if v := Equal(l, r); v == nil { - result[k] = l - } - } - } else { - result[k] = l - } - } - - return result -} - -func MapsIntersection(left, right map[interface{}]interface{}) map[interface{}]interface{} { - result := map[interface{}]interface{}{} - - for k, l := range left { - if r, ok := right[k]; ok { - switch l := l.(type) { - case map[interface{}]interface{}: - switch r := r.(type) { - case map[interface{}]interface{}: - result[k] = MapsIntersection(l, r) - default: - if v := Equal(l, r); v != nil { - result[k] = v - } - } - default: - if v := Equal(l, r); v != nil { - result[k] = v - } - } - } - } - - return result -} - func MapCopy(data map[interface{}]interface{}) map[interface{}]interface{} { result := map[interface{}]interface{}{} for k, v := range data { @@ -240,6 +146,16 @@ func SliceCopy(data []interface{}) []interface{} { return result } +func RemoveString(slice []string, s string) []string { + result := []string{} + for _, elem := range slice { + if elem != s { + result = append(result, elem) + } + } + return result +} + func ToStrings(data []interface{}) []string { result := make([]string, len(data), len(data)) for k, v := range data { @@ -248,18 +164,6 @@ func ToStrings(data []interface{}) []string { return result } -func DirLs(dir string) ([]interface{}, error) { - result := []interface{}{} - files, err := ioutil.ReadDir(dir) - if err != nil { - return result, err - } - for _, f := range files { - result = append(result, f) - } - return result, nil -} - func Map2KVPairs(m map[string]string) []string { r := make([]string, 0, len(m)) for k, v := range m { @@ -289,3 +193,35 @@ func TrimSplitN(str, sep string, count int) []string { func TrimSplit(str, sep string) []string { return TrimSplitN(str, sep, -1) } + +func GetCurrentContainerId() (string, error) { + file, err := os.Open(DOCKER_CGROUPS_FILE) + + if err != nil { + return "", err + } + + fileReader := bufio.NewScanner(file) + if !fileReader.Scan() { + return "", errors.New("Empty file /proc/self/cgroup") + } + line := fileReader.Text() + parts := strings.Split(line, "/") + + for len(parts) != 3 { + if !fileReader.Scan() { + return "", errors.New("Found no docker cgroups") + } + line = fileReader.Text() + parts = strings.Split(line, "/") + if len(parts) == 3 { + if strings.HasSuffix(parts[1], "docker") { + break + } else { + parts = nil + } + } + } + + return parts[len(parts)-1:][0], nil +} diff --git a/util/util_test.go b/util/util_test.go index 37327081..6f71c05e 100644 --- a/util/util_test.go +++ b/util/util_test.go @@ -12,15 +12,6 @@ type testCloudConfig struct { Key2 string `yaml:"key2,omitempty"` } -func TestPassByValue(t *testing.T) { - assert := require.New(t) - cc0ptr := &testCloudConfig{} - cc0ptr.Hostname = "test0" - cc1 := *cc0ptr - cc1.Hostname = "test1" - assert.NotEqual(cc0ptr.Hostname, cc1.Hostname) -} - func TestConvertMergesLeftIntoRight(t *testing.T) { assert := require.New(t) cc0 := testCloudConfig{Key1: "k1v0", Key2: "k2v0"} @@ -30,13 +21,6 @@ func TestConvertMergesLeftIntoRight(t *testing.T) { assert.Equal(expected, cc0) } -func TestNilMap(t *testing.T) { - assert := require.New(t) - var m map[string]interface{} = nil - assert.True(m == nil) - assert.True(len(m) == 0) -} - func NoTestCopyPointer(t *testing.T) { assert := require.New(t) testCCpt := &testCloudConfig{} @@ -48,35 +32,6 @@ func NoTestCopyPointer(t *testing.T) { assert.Equal("", m1["b"].(*testCloudConfig).Hostname) } -func TestEmptyMap(t *testing.T) { - assert := require.New(t) - m := map[interface{}]interface{}{} - assert.True(len(m) == 0) -} - -func tryMutateArg(p *string) *string { - s := "test" - p = &s - return p -} - -func TestMutableArg(t *testing.T) { - assert := require.New(t) - s := "somestring" - p := &s - assert.NotEqual(tryMutateArg(p), p) -} - -func TestFilter(t *testing.T) { - assert := require.New(t) - ss := []interface{}{"1", "2", "3", "4"} - assert.Equal([]interface{}{"1", "2", "4"}, Filter(ss, func(x interface{}) bool { return x != "3" })) - - ss1 := append([]interface{}{}, "qqq") - assert.Equal([]interface{}{"qqq"}, ss1) - -} - func TestMapCopy(t *testing.T) { assert := require.New(t) m0 := map[interface{}]interface{}{"a": 1, "b": map[interface{}]interface{}{"c": 3}, "d": "4"} @@ -109,52 +64,7 @@ func TestSliceCopy(t *testing.T) { assert.Equal(len(b1), len(b0)+1) } -func TestMapsIntersection(t *testing.T) { - assert := require.New(t) - - m0 := map[interface{}]interface{}{ - "a": 1, - "b": map[interface{}]interface{}{"c": 3}, - "d": "4", - "e": []interface{}{1, 2, 3}, - } - m1 := MapCopy(m0) - - delete(m0, "a") - b1 := m1["b"].(map[interface{}]interface{}) - delete(b1, "c") - m1["e"] = []interface{}{2, 3, 4} - expected := map[interface{}]interface{}{"b": map[interface{}]interface{}{}, "d": "4"} - assert.Equal(expected, MapsIntersection(m0, m1)) -} - -func TestMapsDifference(t *testing.T) { - assert := require.New(t) - - m0 := map[interface{}]interface{}{ - "a": 1, - "b": map[interface{}]interface{}{"c": 3}, - "d": "4", - "e": []interface{}{1, 2, 3}, - } - m1 := MapCopy(m0) - - assert.Equal(map[interface{}]interface{}{}, MapsDifference(m0, m0)) - assert.Equal(map[interface{}]interface{}{}, MapsDifference(m0, m1)) - - delete(m1, "a") - b1 := m1["b"].(map[interface{}]interface{}) - delete(b1, "c") - m1["e"] = []interface{}{2, 3, 4} - - expectedM1M0 := map[interface{}]interface{}{"b": map[interface{}]interface{}{}, "e": []interface{}{2, 3, 4}} - assert.Equal(expectedM1M0, MapsDifference(m1, m0)) - - expectedM0M1 := map[interface{}]interface{}{"a": 1, "b": map[interface{}]interface{}{"c": 3}, "e": []interface{}{1, 2, 3}} - assert.Equal(expectedM0M1, MapsDifference(m0, m1)) -} - -func TestMapsUnion(t *testing.T) { +func TestMerge(t *testing.T) { assert := require.New(t) m0 := map[interface{}]interface{}{ @@ -178,5 +88,5 @@ func TestMapsUnion(t *testing.T) { "e": "added", "f": []interface{}{2, 3, 4}, } - assert.Equal(expected, MapsUnion(m0, m1)) + assert.Equal(expected, Merge(m0, m1)) } diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE new file mode 100644 index 00000000..b8b569d7 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 00000000..478862a8 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,15 @@ +# go-winio + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe +for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 00000000..bfefd42c --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,241 @@ +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "syscall" + "unicode/utf16" +) + +//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId + BackupReparseData + BackupSparseBlock + BackupTxfsData + + StreamSparseAttributes = uint32(8) +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamId struct { + StreamId uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Write(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { + if _, err := io.Copy(ioutil.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamId + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamId, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = syscall.UTF16ToString(name) + } + if wsi.StreamId == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamId{ + StreamId: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + runtime.SetFinalizer(r, func(r *BackupFileReader) { r.Close() }) + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{"BackupRead", r.f.Name(), err} + } + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWrtier returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + runtime.SetFinalizer(w, func(w *BackupFileWriter) { w.Close() }) + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + } + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + w.ctx = 0 + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 00000000..fd16f007 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,219 @@ +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "syscall" + "time" +) + +//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys timeBeginPeriod(period uint32) (n int32) = winmm.timeBeginPeriod + +const ( + cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +var ioInitOnce sync.Once +var ioCompletionPort syscall.Handle + +// ioResult contains the result of an asynchronous IO operation +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO +type ioOperation struct { + o syscall.Overlapped + ch chan ioResult +} + +func initIo() { + h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle syscall.Handle + wg sync.WaitGroup + closing bool + readDeadline time.Time + writeDeadline time.Time +} + +// makeWin32File makes a new win32File from an existing file handle +func makeWin32File(h syscall.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIo) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + runtime.SetFinalizer(f, (*win32File).closeHandle) + return f, nil +} + +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + return makeWin32File(h) +} + +// closeHandle closes the resources associated with a Win32 handle +func (f *win32File) closeHandle() { + if !f.closing { + // cancel all IO and wait for it to complete + f.closing = true + cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + syscall.Close(f.handle) + f.handle = 0 + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + runtime.SetFinalizer(f, nil) + return nil +} + +// prepareIo prepares for a new IO operation +func (f *win32File) prepareIo() (*ioOperation, error) { + f.wg.Add(1) + if f.closing { + return nil, ErrFileClosed + } + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever +func ioCompletionProcessor(h syscall.Handle) { + // Set the timer resolution to 1. This fixes a performance regression in golang 1.6. + timeBeginPeriod(1) + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIo(c *ioOperation, deadline time.Time, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { + f.wg.Done() + return int(bytes), err + } else { + var r ioResult + wait := true + timedout := false + if f.closing { + cancelIoEx(f.handle, &c.o) + } else if !deadline.IsZero() { + now := time.Now() + if !deadline.After(now) { + timedout = true + } else { + timeout := time.After(deadline.Sub(now)) + select { + case r = <-c.ch: + wait = false + case <-timeout: + timedout = true + } + } + } + if timedout { + cancelIoEx(f.handle, &c.o) + } + if wait { + r = <-c.ch + } + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + if f.closing { + err = ErrFileClosed + } else if timedout { + err = ErrTimeout + } + } + f.wg.Done() + return int(r.bytes), err + } +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + var bytes uint32 + err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, f.readDeadline, bytes, err) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == syscall.ERROR_BROKEN_PIPE { + return 0, io.EOF + } else { + return n, err + } +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + var bytes uint32 + err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + return f.asyncIo(c, f.writeDeadline, bytes, err) +} + +func (f *win32File) SetReadDeadline(t time.Time) error { + f.readDeadline = t + return nil +} + +func (f *win32File) SetWriteDeadline(t time.Time) error { + f.writeDeadline = t + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 00000000..dc05a8b3 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,30 @@ +package winio + +import ( + "os" + "syscall" + "unsafe" +) + +//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx +//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle + +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime + FileAttributes uintptr // includes padding +} + +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &FileBasicInfo{} + if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), 0, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return nil, &os.PathError{"GetFileInformationByHandleEx", f.Name(), err} + } + return bi, nil +} + +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + if err := setFileInformationByHandle(syscall.Handle(f.Fd()), 0, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return &os.PathError{"SetFileInformationByHandle", f.Name(), err} + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 00000000..82db2830 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,398 @@ +package winio + +import ( + "errors" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" +) + +//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW +//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW +//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW + +type securityAttributes struct { + Length uint32 + SecurityDescriptor *byte + InheritHandle uint32 +} + +const ( + cERROR_PIPE_BUSY = syscall.Errno(231) + cERROR_PIPE_CONNECTED = syscall.Errno(535) + cERROR_SEM_TIMEOUT = syscall.Errno(121) + + cPIPE_ACCESS_DUPLEX = 0x3 + cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000 + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 + + cPIPE_REJECT_REMOTE_CLIENTS = 0x8 + + cPIPE_UNLIMITED_INSTANCES = 255 + + cNMPWAIT_USE_DEFAULT_WAIT = 0 + cNMPWAIT_NOWAIT = 1 + + cPIPE_TYPE_MESSAGE = 4 + + cPIPE_READMODE_MESSAGE = 2 +) + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + // This error should match net.errClosing since docker takes a dependency on its text. + ErrPipeListenerClosed = errors.New("use of closed network connection") + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + f.SetReadDeadline(t) + f.SetWriteDeadline(t) + return nil +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + _, err := f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } + return n, err +} + +func (s pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then the timeout +// is the default timeout established by the pipe server. +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } + var err error + var h syscall.Handle + for { + h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err != cERROR_PIPE_BUSY { + break + } + now := time.Now() + var ms uint32 + if absTimeout.IsZero() { + ms = cNMPWAIT_USE_DEFAULT_WAIT + } else if now.After(absTimeout) { + ms = cNMPWAIT_NOWAIT + } else { + ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000) + } + err = waitNamedPipe(path, ms) + if err != nil { + if err == cERROR_SEM_TIMEOUT { + return nil, ErrTimeout + } + break + } + } + if err != nil { + return nil, &os.PathError{Op: "open", Path: path, Err: err} + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + var state uint32 + err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0) + if err != nil { + return nil, err + } + + if state&cPIPE_READMODE_MESSAGE != 0 { + return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")} + } + + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&cPIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle syscall.Handle + path string + securityDescriptor []byte + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED + if first { + flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE + } + + var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS + if c.MessageMode { + mode |= cPIPE_TYPE_MESSAGE + } + + var sa securityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + if securityDescriptor != nil { + sa.SecurityDescriptor = &securityDescriptor[0] + } + h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, &sa) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + p, err := l.makeServerPipe() + if err == nil { + // Wait for the client to connect. + ch := make(chan error) + go func() { + ch <- connectPipe(p) + }() + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { + err = ErrPipeListenerClosed + } + closed = true + } + } + responseCh <- acceptResponse{p, err} + } + } + syscall.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size the input buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + // Immediately open and then close a client handle so that the named pipe is + // created but not currently accepting connections. + h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err != nil { + syscall.Close(h) + return nil, err + } + syscall.Close(h2) + l := &win32PipeListener{ + firstHandle: h, + path: path, + securityDescriptor: sd, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIo() + if err != nil { + return err + } + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIo(c, time.Time{}, 0, err) + if err != nil && err != cERROR_PIPE_CONNECTED { + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 00000000..81f9af7b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,150 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "syscall" + "unicode/utf16" +) + +//sys adjustTokenPrivileges(token syscall.Handle, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *syscall.Handle) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + SE_PRIVILEGE_ENABLED = 2 + + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" +) + +const ( + securityAnonymous = iota + securityIdentification + securityImpersonation + securityDelegation +) + +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "" + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } else { + s = "Could not enable privilege " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +func RunWithPrivileges(names []string, fn func() error) error { + var privileges []uint64 + for _, name := range names { + p := uint64(0) + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return err + } + privileges = append(privileges, p) + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges) + if err != nil { + return err + } + return fn() +} + +func adjustPrivileges(token syscall.Handle, privileges []uint64) error { + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + binary.Write(&b, binary.LittleEndian, p) + binary.Write(&b, binary.LittleEndian, uint32(SE_PRIVILEGE_ENABLED)) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langId uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langId) + if err != nil { + return fmt.Sprintf("", utf16.Decode(nameBuffer[:bufSize])) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (syscall.Handle, error) { + err := impersonateSelf(securityImpersonation) + if err != nil { + panic(err) + return 0, err + } + + var token syscall.Handle + err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h syscall.Handle) { + err := revertToSelf() + if err != nil { + panic(err) + } + syscall.Close(h) +} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 00000000..96d7b9a8 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,124 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + isMountPoint := false + tag := binary.LittleEndian.Uint32(b[0:4]) + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 16 + binary.LittleEndian.Uint16(b[12:14]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[14:16]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = rp.Target + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + binary.Write(&b, binary.LittleEndian, flags) + } + + binary.Write(&b, binary.LittleEndian, ntTarget16) + binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 00000000..60ab56ce --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,96 @@ +package winio + +import ( + "syscall" + "unsafe" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW +//sys localFree(mem uintptr) = LocalFree +//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength + +const ( + cERROR_NONE_MAPPED = syscall.Errno(1332) +) + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch e.Err { + case cERROR_NONE_MAPPED: + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +// LookupSidByName looks up the SID of an account by name +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + localFree(uintptr(unsafe.Pointer(strBuffer))) + return sid, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + var sdBuffer uintptr + err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + if err != nil { + return nil, &SddlConversionError{sddl, err} + } + defer localFree(sdBuffer) + sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) + copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) + return sd, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + var sddl *uint16 + // The returned string length seems to including an aribtrary number of terminating NULs. + // Don't use it. + err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) + if err != nil { + return "", err + } + defer localFree(uintptr(unsafe.Pointer(sddl))) + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 00000000..96fdff7b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,3 @@ +package winio + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall.go b/vendor/github.com/Microsoft/go-winio/zsyscall.go new file mode 100644 index 00000000..74b6e97a --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/zsyscall.go @@ -0,0 +1,492 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package winio + +import "unsafe" +import "syscall" + +var _ unsafe.Pointer + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + modwinmm = syscall.NewLazyDLL("winmm.dll") + modadvapi32 = syscall.NewLazyDLL("advapi32.dll") + + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procLocalFree = modkernel32.NewProc("LocalFree") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") +) + +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func timeBeginPeriod(period uint32) (n int32) { + r0, _, _ := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + n = int32(r0) + return +} + +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _createFile(name *uint16, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func waitNamedPipe(name string, timeout uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _waitNamedPipe(_p0, timeout) +} + +func _waitNamedPipe(name *uint16, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return +} + +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func adjustTokenPrivileges(token syscall.Handle, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *syscall.Handle) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/RackSec/srslog/.gitignore b/vendor/github.com/RackSec/srslog/.gitignore new file mode 100644 index 00000000..ebf0f2e4 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/.gitignore @@ -0,0 +1 @@ +.cover diff --git a/vendor/github.com/RackSec/srslog/.travis.yml b/vendor/github.com/RackSec/srslog/.travis.yml new file mode 100644 index 00000000..4e5c4f07 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/.travis.yml @@ -0,0 +1,18 @@ +sudo: required +dist: trusty +group: edge +language: go +go: +- 1.5 +before_install: + - pip install --user codecov +script: +- | + go get ./... + go test -v -coverprofile=coverage.txt -covermode=atomic + go vet +after_success: + - codecov +notifications: + slack: + secure: dtDue9gP6CRR1jYjEf6raXXFak3QKGcCFvCf5mfvv5XScdpmc3udwgqc5TdyjC0goaC9OK/4jTcCD30dYZm/u6ux3E9mo3xwMl2xRLHx76p5r9rSQtloH19BDwA2+A+bpDfFQVz05k2YXuTiGSvNMMdwzx+Dr294Sl/z43RFB4+b9/R/6LlFpRW89IwftvpLAFnBy4K/ZcspQzKM+rQfQTL5Kk+iZ/KBsuR/VziDq6MoJ8t43i4ee8vwS06vFBKDbUiZ4FIZpLgc2RAL5qso5aWRKYXL6waXfoKHZWKPe0w4+9IY1rDJxG1jEb7YGgcbLaF9xzPRRs2b2yO/c87FKpkh6PDgYHfLjpgXotCoojZrL4p1x6MI1ldJr3NhARGPxS9r4liB9n6Y5nD+ErXi1IMf55fuUHcPY27Jc0ySeLFeM6cIWJ8OhFejCgGw6a5DnnmJo0PqopsaBDHhadpLejT1+K6bL2iGkT4SLcVNuRGLs+VyuNf1+5XpkWZvy32vquO7SZOngLLBv+GIem+t3fWm0Z9s/0i1uRCQei1iUutlYjoV/LBd35H2rhob4B5phIuJin9kb0zbHf6HnaoN0CtN8r0d8G5CZiInVlG5Xcid5Byb4dddf5U2EJTDuCMVyyiM7tcnfjqw9UbVYNxtYM9SzcqIq+uVqM8pYL9xSec= diff --git a/vendor/github.com/RackSec/srslog/CODE_OF_CONDUCT.md b/vendor/github.com/RackSec/srslog/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..18ac49fc --- /dev/null +++ b/vendor/github.com/RackSec/srslog/CODE_OF_CONDUCT.md @@ -0,0 +1,50 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of +fostering an open and welcoming community, we pledge to respect all people who +contribute through reporting issues, posting feature requests, updating +documentation, submitting pull requests or patches, and other activities. + +We are committed to making participation in this project a harassment-free +experience for everyone, regardless of level of experience, gender, gender +identity and expression, sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, such as physical or electronic + addresses, without explicit permission +* Other unethical or unprofessional conduct + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +By adopting this Code of Conduct, project maintainers commit themselves to +fairly and consistently applying these principles to every aspect of managing +this project. Project maintainers who do not follow or enforce the Code of +Conduct may be permanently removed from the project team. + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting a project maintainer at [sirsean@gmail.com]. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. Maintainers are +obligated to maintain confidentiality with regard to the reporter of an +incident. + + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.3.0, available at +[http://contributor-covenant.org/version/1/3/0/][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/3/0/ diff --git a/vendor/github.com/RackSec/srslog/LICENSE b/vendor/github.com/RackSec/srslog/LICENSE new file mode 100644 index 00000000..9269338f --- /dev/null +++ b/vendor/github.com/RackSec/srslog/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015 Rackspace. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/RackSec/srslog/README.md b/vendor/github.com/RackSec/srslog/README.md new file mode 100644 index 00000000..1ae1fd4e --- /dev/null +++ b/vendor/github.com/RackSec/srslog/README.md @@ -0,0 +1,131 @@ +[![Build Status](https://travis-ci.org/RackSec/srslog.svg?branch=master)](https://travis-ci.org/RackSec/srslog) + +# srslog + +Go has a `syslog` package in the standard library, but it has the following +shortcomings: + +1. It doesn't have TLS support +2. [According to bradfitz on the Go team, it is no longer being maintained.](https://github.com/golang/go/issues/13449#issuecomment-161204716) + +I agree that it doesn't need to be in the standard library. So, I've +followed Brad's suggestion and have made a separate project to handle syslog. + +This code was taken directly from the Go project as a base to start from. + +However, this _does_ have TLS support. + +# Usage + +Basic usage retains the same interface as the original `syslog` package. We +only added to the interface where required to support new functionality. + +Switch from the standard library: + +``` +import( + //"log/syslog" + syslog "github.com/RackSec/srslog" +) +``` + +You can still use it for local syslog: + +``` +w, err := syslog.Dial("", "", syslog.LOG_ERR, "testtag") +``` + +Or to unencrypted UDP: + +``` +w, err := syslog.Dial("udp", "192.168.0.50:514", syslog.LOG_ERR, "testtag") +``` + +Or to unencrypted TCP: + +``` +w, err := syslog.Dial("tcp", "192.168.0.51:514", syslog.LOG_ERR, "testtag") +``` + +But now you can also send messages via TLS-encrypted TCP: + +``` +w, err := syslog.DialWithTLSCertPath("tcp+tls", "192.168.0.52:514", syslog.LOG_ERR, "testtag", "/path/to/servercert.pem") +``` + +And if you need more control over your TLS configuration : + +``` +pool := x509.NewCertPool() +serverCert, err := ioutil.ReadFile("/path/to/servercert.pem") +if err != nil { + return nil, err +} +pool.AppendCertsFromPEM(serverCert) +config := tls.Config{ + RootCAs: pool, +} + +w, err := DialWithTLSConfig(network, raddr, priority, tag, &config) +``` + +(Note that in both TLS cases, this uses a self-signed certificate, where the +remote syslog server has the keypair and the client has only the public key.) + +And then to write log messages, continue like so: + +``` +if err != nil { + log.Fatal("failed to connect to syslog:", err) +} +defer w.Close() + +w.Alert("this is an alert") +w.Crit("this is critical") +w.Err("this is an error") +w.Warning("this is a warning") +w.Notice("this is a notice") +w.Info("this is info") +w.Debug("this is debug") +w.Write([]byte("these are some bytes")) +``` + +# Generating TLS Certificates + +We've provided a script that you can use to generate a self-signed keypair: + +``` +pip install cryptography +python script/gen-certs.py +``` + +That outputs the public key and private key to standard out. Put those into +`.pem` files. (And don't put them into any source control. The certificate in +the `test` directory is used by the unit tests, and please do not actually use +it anywhere else.) + +# Running Tests + +Run the tests as usual: + +``` +go test +``` + +But we've also provided a test coverage script that will show you which +lines of code are not covered: + +``` +script/coverage --html +``` + +That will open a new browser tab showing coverage information. + +# License + +This project uses the New BSD License, the same as the Go project itself. + +# Code of Conduct + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. diff --git a/vendor/github.com/RackSec/srslog/constants.go b/vendor/github.com/RackSec/srslog/constants.go new file mode 100644 index 00000000..600801ee --- /dev/null +++ b/vendor/github.com/RackSec/srslog/constants.go @@ -0,0 +1,68 @@ +package srslog + +import ( + "errors" +) + +// Priority is a combination of the syslog facility and +// severity. For example, LOG_ALERT | LOG_FTP sends an alert severity +// message from the FTP facility. The default severity is LOG_EMERG; +// the default facility is LOG_KERN. +type Priority int + +const severityMask = 0x07 +const facilityMask = 0xf8 + +const ( + // Severity. + + // From /usr/include/sys/syslog.h. + // These are the same on Linux, BSD, and OS X. + LOG_EMERG Priority = iota + LOG_ALERT + LOG_CRIT + LOG_ERR + LOG_WARNING + LOG_NOTICE + LOG_INFO + LOG_DEBUG +) + +const ( + // Facility. + + // From /usr/include/sys/syslog.h. + // These are the same up to LOG_FTP on Linux, BSD, and OS X. + LOG_KERN Priority = iota << 3 + LOG_USER + LOG_MAIL + LOG_DAEMON + LOG_AUTH + LOG_SYSLOG + LOG_LPR + LOG_NEWS + LOG_UUCP + LOG_CRON + LOG_AUTHPRIV + LOG_FTP + _ // unused + _ // unused + _ // unused + _ // unused + LOG_LOCAL0 + LOG_LOCAL1 + LOG_LOCAL2 + LOG_LOCAL3 + LOG_LOCAL4 + LOG_LOCAL5 + LOG_LOCAL6 + LOG_LOCAL7 +) + +func validatePriority(p Priority) error { + if p < 0 || p > LOG_LOCAL7|LOG_DEBUG { + return errors.New("log/syslog: invalid priority") + } else { + return nil + } +} diff --git a/vendor/github.com/RackSec/srslog/dialer.go b/vendor/github.com/RackSec/srslog/dialer.go new file mode 100644 index 00000000..47a7b2be --- /dev/null +++ b/vendor/github.com/RackSec/srslog/dialer.go @@ -0,0 +1,87 @@ +package srslog + +import ( + "crypto/tls" + "net" +) + +// dialerFunctionWrapper is a simple object that consists of a dialer function +// and its name. This is primarily for testing, so we can make sure that the +// getDialer method returns the correct dialer function. However, if you ever +// find that you need to check which dialer function you have, this would also +// be useful for you without having to use reflection. +type dialerFunctionWrapper struct { + Name string + Dialer func() (serverConn, string, error) +} + +// Call the wrapped dialer function and return its return values. +func (df dialerFunctionWrapper) Call() (serverConn, string, error) { + return df.Dialer() +} + +// getDialer returns a "dialer" function that can be called to connect to a +// syslog server. +// +// Each dialer function is responsible for dialing the remote host and returns +// a serverConn, the hostname (or a default if the Writer has not specified a +// hostname), and an error in case dialing fails. +// +// The reason for separate dialers is that different network types may need +// to dial their connection differently, yet still provide a net.Conn interface +// that you can use once they have dialed. Rather than an increasingly long +// conditional, we have a map of network -> dialer function (with a sane default +// value), and adding a new network type is as easy as writing the dialer +// function and adding it to the map. +func (w *Writer) getDialer() dialerFunctionWrapper { + dialers := map[string]dialerFunctionWrapper{ + "": dialerFunctionWrapper{"unixDialer", w.unixDialer}, + "tcp+tls": dialerFunctionWrapper{"tlsDialer", w.tlsDialer}, + } + dialer, ok := dialers[w.network] + if !ok { + dialer = dialerFunctionWrapper{"basicDialer", w.basicDialer} + } + return dialer +} + +// unixDialer uses the unixSyslog method to open a connection to the syslog +// daemon running on the local machine. +func (w *Writer) unixDialer() (serverConn, string, error) { + sc, err := unixSyslog() + hostname := w.hostname + if hostname == "" { + hostname = "localhost" + } + return sc, hostname, err +} + +// tlsDialer connects to TLS over TCP, and is used for the "tcp+tls" network +// type. +func (w *Writer) tlsDialer() (serverConn, string, error) { + c, err := tls.Dial("tcp", w.raddr, w.tlsConfig) + var sc serverConn + hostname := w.hostname + if err == nil { + sc = &netConn{conn: c} + if hostname == "" { + hostname = c.LocalAddr().String() + } + } + return sc, hostname, err +} + +// basicDialer is the most common dialer for syslog, and supports both TCP and +// UDP connections. +func (w *Writer) basicDialer() (serverConn, string, error) { + c, err := net.Dial(w.network, w.raddr) + var sc serverConn + hostname := w.hostname + if err == nil { + sc = &netConn{conn: c} + if hostname == "" { + hostname = c.LocalAddr().String() + } + } + return sc, hostname, err +} diff --git a/vendor/github.com/RackSec/srslog/formatter.go b/vendor/github.com/RackSec/srslog/formatter.go new file mode 100644 index 00000000..2a746251 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/formatter.go @@ -0,0 +1,48 @@ +package srslog + +import ( + "fmt" + "os" + "time" +) + +// Formatter is a type of function that takes the consituent parts of a +// syslog message and returns a formatted string. A different Formatter is +// defined for each different syslog protocol we support. +type Formatter func(p Priority, hostname, tag, content string) string + +// DefaultFormatter is the original format supported by the Go syslog package, +// and is a non-compliant amalgamation of 3164 and 5424 that is intended to +// maximize compatibility. +func DefaultFormatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + msg := fmt.Sprintf("<%d> %s %s %s[%d]: %s", + p, timestamp, hostname, tag, os.Getpid(), content) + return msg +} + +// UnixFormatter omits the hostname, because it is only used locally. +func UnixFormatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.Stamp) + msg := fmt.Sprintf("<%d>%s %s[%d]: %s", + p, timestamp, tag, os.Getpid(), content) + return msg +} + +// RFC3164Formatter provides an RFC 3164 compliant message. +func RFC3164Formatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.Stamp) + msg := fmt.Sprintf("<%d> %s %s %s[%d]: %s", + p, timestamp, hostname, tag, os.Getpid(), content) + return msg +} + +// RFC5424Formatter provides an RFC 5424 compliant message. +func RFC5424Formatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + appName := os.Args[0] + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + p, 1, timestamp, hostname, appName, pid, tag, content) + return msg +} diff --git a/vendor/github.com/RackSec/srslog/framer.go b/vendor/github.com/RackSec/srslog/framer.go new file mode 100644 index 00000000..ab46f0de --- /dev/null +++ b/vendor/github.com/RackSec/srslog/framer.go @@ -0,0 +1,24 @@ +package srslog + +import ( + "fmt" +) + +// Framer is a type of function that takes an input string (typically an +// already-formatted syslog message) and applies "message framing" to it. We +// have different framers because different versions of the syslog protocol +// and its transport requirements define different framing behavior. +type Framer func(in string) string + +// DefaultFramer does nothing, since there is no framing to apply. This is +// the original behavior of the Go syslog package, and is also typically used +// for UDP syslog. +func DefaultFramer(in string) string { + return in +} + +// RFC5425MessageLengthFramer prepends the message length to the front of the +// provided message, as defined in RFC 5425. +func RFC5425MessageLengthFramer(in string) string { + return fmt.Sprintf("%d %s", len(in), in) +} diff --git a/vendor/github.com/RackSec/srslog/net_conn.go b/vendor/github.com/RackSec/srslog/net_conn.go new file mode 100644 index 00000000..75e4c3ca --- /dev/null +++ b/vendor/github.com/RackSec/srslog/net_conn.go @@ -0,0 +1,30 @@ +package srslog + +import ( + "net" +) + +// netConn has an internal net.Conn and adheres to the serverConn interface, +// allowing us to send syslog messages over the network. +type netConn struct { + conn net.Conn +} + +// writeString formats syslog messages using time.RFC3339 and includes the +// hostname, and sends the message to the connection. +func (n *netConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { + if framer == nil { + framer = DefaultFramer + } + if formatter == nil { + formatter = DefaultFormatter + } + formattedMessage := framer(formatter(p, hostname, tag, msg)) + _, err := n.conn.Write([]byte(formattedMessage)) + return err +} + +// close the network connection +func (n *netConn) close() error { + return n.conn.Close() +} diff --git a/vendor/github.com/RackSec/srslog/srslog.go b/vendor/github.com/RackSec/srslog/srslog.go new file mode 100644 index 00000000..4469d720 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/srslog.go @@ -0,0 +1,100 @@ +package srslog + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "log" + "os" +) + +// This interface allows us to work with both local and network connections, +// and enables Solaris support (see syslog_unix.go). +type serverConn interface { + writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, s string) error + close() error +} + +// New establishes a new connection to the system log daemon. Each +// write to the returned Writer sends a log message with the given +// priority and prefix. +func New(priority Priority, tag string) (w *Writer, err error) { + return Dial("", "", priority, tag) +} + +// Dial establishes a connection to a log daemon by connecting to +// address raddr on the specified network. Each write to the returned +// Writer sends a log message with the given facility, severity and +// tag. +// If network is empty, Dial will connect to the local syslog server. +func Dial(network, raddr string, priority Priority, tag string) (*Writer, error) { + return DialWithTLSConfig(network, raddr, priority, tag, nil) +} + +// DialWithTLSCertPath establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses certPath to load TLS certificates and configure +// the secure connection. +func DialWithTLSCertPath(network, raddr string, priority Priority, tag, certPath string) (*Writer, error) { + serverCert, err := ioutil.ReadFile(certPath) + if err != nil { + return nil, err + } + + return DialWithTLSCert(network, raddr, priority, tag, serverCert) +} + +// DialWIthTLSCert establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses serverCert to load a TLS certificate +// and configure the secure connection. +func DialWithTLSCert(network, raddr string, priority Priority, tag string, serverCert []byte) (*Writer, error) { + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(serverCert) + config := tls.Config{ + RootCAs: pool, + } + + return DialWithTLSConfig(network, raddr, priority, tag, &config) +} + +// DialWithTLSConfig establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses tlsConfig to configure the secure connection. +func DialWithTLSConfig(network, raddr string, priority Priority, tag string, tlsConfig *tls.Config) (*Writer, error) { + if err := validatePriority(priority); err != nil { + return nil, err + } + + if tag == "" { + tag = os.Args[0] + } + hostname, _ := os.Hostname() + + w := &Writer{ + priority: priority, + tag: tag, + hostname: hostname, + network: network, + raddr: raddr, + tlsConfig: tlsConfig, + } + + w.Lock() + defer w.Unlock() + + err := w.connect() + if err != nil { + return nil, err + } + return w, err +} + +// NewLogger creates a log.Logger whose output is written to +// the system log service with the specified priority. The logFlag +// argument is the flag set passed through to log.New to create +// the Logger. +func NewLogger(p Priority, logFlag int) (*log.Logger, error) { + s, err := New(p, "") + if err != nil { + return nil, err + } + return log.New(s, "", logFlag), nil +} diff --git a/vendor/github.com/RackSec/srslog/srslog_unix.go b/vendor/github.com/RackSec/srslog/srslog_unix.go new file mode 100644 index 00000000..a04d9396 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/srslog_unix.go @@ -0,0 +1,54 @@ +package srslog + +import ( + "errors" + "io" + "net" +) + +// unixSyslog opens a connection to the syslog daemon running on the +// local machine using a Unix domain socket. This function exists because of +// Solaris support as implemented by gccgo. On Solaris you can not +// simply open a TCP connection to the syslog daemon. The gccgo +// sources have a syslog_solaris.go file that implements unixSyslog to +// return a type that satisfies the serverConn interface and simply calls the C +// library syslog function. +func unixSyslog() (conn serverConn, err error) { + logTypes := []string{"unixgram", "unix"} + logPaths := []string{"/dev/log", "/var/run/syslog", "/var/run/log"} + for _, network := range logTypes { + for _, path := range logPaths { + conn, err := net.Dial(network, path) + if err != nil { + continue + } else { + return &localConn{conn: conn}, nil + } + } + } + return nil, errors.New("Unix syslog delivery error") +} + +// localConn adheres to the serverConn interface, allowing us to send syslog +// messages to the local syslog daemon over a Unix domain socket. +type localConn struct { + conn io.WriteCloser +} + +// writeString formats syslog messages using time.Stamp instead of time.RFC3339, +// and omits the hostname (because it is expected to be used locally). +func (n *localConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { + if framer == nil { + framer = DefaultFramer + } + if formatter == nil { + formatter = UnixFormatter + } + _, err := n.conn.Write([]byte(framer(formatter(p, hostname, tag, msg)))) + return err +} + +// close the (local) network connection +func (n *localConn) close() error { + return n.conn.Close() +} diff --git a/vendor/github.com/RackSec/srslog/writer.go b/vendor/github.com/RackSec/srslog/writer.go new file mode 100644 index 00000000..fdecaf61 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/writer.go @@ -0,0 +1,164 @@ +package srslog + +import ( + "crypto/tls" + "strings" + "sync" +) + +// A Writer is a connection to a syslog server. +type Writer struct { + sync.Mutex // guards conn + + priority Priority + tag string + hostname string + network string + raddr string + tlsConfig *tls.Config + framer Framer + formatter Formatter + + conn serverConn +} + +// connect makes a connection to the syslog server. +// It must be called with w.mu held. +func (w *Writer) connect() (err error) { + if w.conn != nil { + // ignore err from close, it makes sense to continue anyway + w.conn.close() + w.conn = nil + } + + var conn serverConn + var hostname string + dialer := w.getDialer() + conn, hostname, err = dialer.Call() + if err == nil { + w.conn = conn + w.hostname = hostname + } + + return +} + +// SetFormatter changes the formatter function for subsequent messages. +func (w *Writer) SetFormatter(f Formatter) { + w.formatter = f +} + +// SetFramer changes the framer function for subsequent messages. +func (w *Writer) SetFramer(f Framer) { + w.framer = f +} + +// Write sends a log message to the syslog daemon using the default priority +// passed into `srslog.New` or the `srslog.Dial*` functions. +func (w *Writer) Write(b []byte) (int, error) { + return w.writeAndRetry(w.priority, string(b)) +} + +// Close closes a connection to the syslog daemon. +func (w *Writer) Close() error { + w.Lock() + defer w.Unlock() + + if w.conn != nil { + err := w.conn.close() + w.conn = nil + return err + } + return nil +} + +// Emerg logs a message with severity LOG_EMERG; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Emerg(m string) (err error) { + _, err = w.writeAndRetry(LOG_EMERG, m) + return err +} + +// Alert logs a message with severity LOG_ALERT; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Alert(m string) (err error) { + _, err = w.writeAndRetry(LOG_ALERT, m) + return err +} + +// Crit logs a message with severity LOG_CRIT; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Crit(m string) (err error) { + _, err = w.writeAndRetry(LOG_CRIT, m) + return err +} + +// Err logs a message with severity LOG_ERR; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Err(m string) (err error) { + _, err = w.writeAndRetry(LOG_ERR, m) + return err +} + +// Warning logs a message with severity LOG_WARNING; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Warning(m string) (err error) { + _, err = w.writeAndRetry(LOG_WARNING, m) + return err +} + +// Notice logs a message with severity LOG_NOTICE; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Notice(m string) (err error) { + _, err = w.writeAndRetry(LOG_NOTICE, m) + return err +} + +// Info logs a message with severity LOG_INFO; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Info(m string) (err error) { + _, err = w.writeAndRetry(LOG_INFO, m) + return err +} + +// Debug logs a message with severity LOG_DEBUG; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Debug(m string) (err error) { + _, err = w.writeAndRetry(LOG_DEBUG, m) + return err +} + +func (w *Writer) writeAndRetry(p Priority, s string) (int, error) { + pr := (w.priority & facilityMask) | (p & severityMask) + + w.Lock() + defer w.Unlock() + + if w.conn != nil { + if n, err := w.write(pr, s); err == nil { + return n, err + } + } + if err := w.connect(); err != nil { + return 0, err + } + return w.write(pr, s) +} + +// write generates and writes a syslog formatted string. It formats the +// message based on the current Formatter and Framer. +func (w *Writer) write(p Priority, msg string) (int, error) { + // ensure it ends in a \n + if !strings.HasSuffix(msg, "\n") { + msg += "\n" + } + + err := w.conn.writeString(w.framer, w.formatter, p, w.hostname, w.tag, msg) + if err != nil { + return 0, err + } + // Note: return the length of the input, not the number of + // bytes printed by Fprintf, because this must behave like + // an io.Writer. + return len(msg), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/entry_test.go b/vendor/github.com/Sirupsen/logrus/entry_test.go deleted file mode 100644 index 99c3b41d..00000000 --- a/vendor/github.com/Sirupsen/logrus/entry_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestEntryWithError(t *testing.T) { - - assert := assert.New(t) - - defer func() { - ErrorKey = "error" - }() - - err := fmt.Errorf("kaboom at layer %d", 4711) - - assert.Equal(err, WithError(err).Data["error"]) - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - - assert.Equal(err, entry.WithError(err).Data["error"]) - - ErrorKey = "err" - - assert.Equal(err, entry.WithError(err).Data["err"]) - -} - -func TestEntryPanicln(t *testing.T) { - errBoom := fmt.Errorf("boom time") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicln("kaboom") -} - -func TestEntryPanicf(t *testing.T) { - errBoom := fmt.Errorf("boom again") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom true", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicf("kaboom %v", true) -} diff --git a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go deleted file mode 100644 index c6d290c7..00000000 --- a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package logrus - -import ( - "fmt" - "testing" - "time" -) - -// smallFields is a small size data set for benchmarking -var smallFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", -} - -// largeFields is a large size data set for benchmarking -var largeFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", - "five": "six", - "seven": "eight", - "nine": "ten", - "eleven": "twelve", - "thirteen": "fourteen", - "fifteen": "sixteen", - "seventeen": "eighteen", - "nineteen": "twenty", - "a": "b", - "c": "d", - "e": "f", - "g": "h", - "i": "j", - "k": "l", - "m": "n", - "o": "p", - "q": "r", - "s": "t", - "u": "v", - "w": "x", - "y": "z", - "this": "will", - "make": "thirty", - "entries": "yeah", -} - -var errorFields = Fields{ - "foo": fmt.Errorf("bar"), - "baz": fmt.Errorf("qux"), -} - -func BenchmarkErrorTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) -} - -func BenchmarkSmallTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) -} - -func BenchmarkLargeTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) -} - -func BenchmarkSmallColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) -} - -func BenchmarkLargeColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) -} - -func BenchmarkSmallJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, smallFields) -} - -func BenchmarkLargeJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, largeFields) -} - -func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { - entry := &Entry{ - Time: time.Time{}, - Level: InfoLevel, - Message: "message", - Data: fields, - } - var d []byte - var err error - for i := 0; i < b.N; i++ { - d, err = formatter.Format(entry) - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(d))) - } -} diff --git a/vendor/github.com/Sirupsen/logrus/hook_test.go b/vendor/github.com/Sirupsen/logrus/hook_test.go deleted file mode 100644 index 13f34cb6..00000000 --- a/vendor/github.com/Sirupsen/logrus/hook_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package logrus - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type TestHook struct { - Fired bool -} - -func (hook *TestHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *TestHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookFires(t *testing.T) { - hook := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - assert.Equal(t, hook.Fired, false) - - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} - -type ModifyHook struct { -} - -func (hook *ModifyHook) Fire(entry *Entry) error { - entry.Data["wow"] = "whale" - return nil -} - -func (hook *ModifyHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookCanModifyEntry(t *testing.T) { - hook := new(ModifyHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - }) -} - -func TestCanFireMultipleHooks(t *testing.T) { - hook1 := new(ModifyHook) - hook2 := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook1) - log.Hooks.Add(hook2) - - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - assert.Equal(t, hook2.Fired, true) - }) -} - -type ErrorHook struct { - Fired bool -} - -func (hook *ErrorHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *ErrorHook) Levels() []Level { - return []Level{ - ErrorLevel, - } -} - -func TestErrorHookShouldntFireOnInfo(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, false) - }) -} - -func TestErrorHookShouldFireOnError(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Error("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go deleted file mode 100644 index 1d708732..00000000 --- a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package logrus - -import ( - "encoding/json" - "errors" - - "testing" -) - -func TestErrorNotLost(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["error"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["omg"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestFieldClashWithTime(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("time", "right now!")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.time"] != "right now!" { - t.Fatal("fields.time not set to original time field") - } - - if entry["time"] != "0001-01-01T00:00:00Z" { - t.Fatal("time field not set to current time, was: ", entry["time"]) - } -} - -func TestFieldClashWithMsg(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("msg", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.msg"] != "something" { - t.Fatal("fields.msg not set to original msg field") - } -} - -func TestFieldClashWithLevel(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.level"] != "something" { - t.Fatal("fields.level not set to original level field") - } -} - -func TestJSONEntryEndsWithNewline(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - if b[len(b)-1] != '\n' { - t.Fatal("Expected JSON log entry to end with a newline") - } -} diff --git a/vendor/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/Sirupsen/logrus/logrus_test.go deleted file mode 100644 index b7d9302d..00000000 --- a/vendor/github.com/Sirupsen/logrus/logrus_test.go +++ /dev/null @@ -1,316 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "strconv" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - log(logger) - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assertions(fields) -} - -func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { - var buffer bytes.Buffer - - logger := New() - logger.Out = &buffer - logger.Formatter = &TextFormatter{ - DisableColors: true, - } - - log(logger) - - fields := make(map[string]string) - for _, kv := range strings.Split(buffer.String(), " ") { - if !strings.Contains(kv, "=") { - continue - } - kvArr := strings.Split(kv, "=") - key := strings.TrimSpace(kvArr[0]) - val := kvArr[1] - if kvArr[1][0] == '"' { - var err error - val, err = strconv.Unquote(val) - assert.NoError(t, err) - } - fields[key] = val - } - assertions(fields) -} - -func TestPrint(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestInfo(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestWarn(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Warn("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "warning") - }) -} - -func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test test") - }) -} - -func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test 10") - }) -} - -func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "testtest") - }) -} - -func TestWithFieldsShouldAllowAssignments(t *testing.T) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - localLog := logger.WithFields(Fields{ - "key1": "value1", - }) - - localLog.WithField("key2", "value2").Info("test") - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assert.Equal(t, "value2", fields["key2"]) - assert.Equal(t, "value1", fields["key1"]) - - buffer = bytes.Buffer{} - fields = Fields{} - localLog.Info("test") - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - _, ok := fields["key2"] - assert.Equal(t, false, ok) - assert.Equal(t, "value1", fields["key1"]) -} - -func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - }) -} - -func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["fields.msg"], "hello") - }) -} - -func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("time", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["fields.time"], "hello") - }) -} - -func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("level", 1).Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["level"], "info") - assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only - }) -} - -func TestDefaultFieldsAreNotPrefixed(t *testing.T) { - LogAndAssertText(t, func(log *Logger) { - ll := log.WithField("herp", "derp") - ll.Info("hello") - ll.Info("bye") - }, func(fields map[string]string) { - for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { - if _, ok := fields[fieldName]; ok { - t.Fatalf("should not have prefixed %q: %v", fieldName, fields) - } - } - }) -} - -func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { - - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - llog := logger.WithField("context", "eating raw fish") - - llog.Info("looks delicious") - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded first message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "looks delicious") - assert.Equal(t, fields["context"], "eating raw fish") - - buffer.Reset() - - llog.Warn("omg it is!") - - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded second message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "omg it is!") - assert.Equal(t, fields["context"], "eating raw fish") - assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") - -} - -func TestConvertLevelToString(t *testing.T) { - assert.Equal(t, "debug", DebugLevel.String()) - assert.Equal(t, "info", InfoLevel.String()) - assert.Equal(t, "warning", WarnLevel.String()) - assert.Equal(t, "error", ErrorLevel.String()) - assert.Equal(t, "fatal", FatalLevel.String()) - assert.Equal(t, "panic", PanicLevel.String()) -} - -func TestParseLevel(t *testing.T) { - l, err := ParseLevel("panic") - assert.Nil(t, err) - assert.Equal(t, PanicLevel, l) - - l, err = ParseLevel("fatal") - assert.Nil(t, err) - assert.Equal(t, FatalLevel, l) - - l, err = ParseLevel("error") - assert.Nil(t, err) - assert.Equal(t, ErrorLevel, l) - - l, err = ParseLevel("warn") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("warning") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("info") - assert.Nil(t, err) - assert.Equal(t, InfoLevel, l) - - l, err = ParseLevel("debug") - assert.Nil(t, err) - assert.Equal(t, DebugLevel, l) - - l, err = ParseLevel("invalid") - assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) -} - -func TestGetSetLevelRace(t *testing.T) { - wg := sync.WaitGroup{} - for i := 0; i < 100; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - if i%2 == 0 { - SetLevel(InfoLevel) - } else { - GetLevel() - } - }(i) - - } - wg.Wait() -} - -func TestLoggingRace(t *testing.T) { - logger := New() - - var wg sync.WaitGroup - wg.Add(100) - - for i := 0; i < 100; i++ { - go func() { - logger.Info("info") - wg.Done() - }() - } - wg.Wait() -} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go deleted file mode 100644 index e25a44f6..00000000 --- a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package logrus - -import ( - "bytes" - "errors" - "testing" - "time" -) - -func TestQuoting(t *testing.T) { - tf := &TextFormatter{DisableColors: true} - - checkQuoting := func(q bool, value interface{}) { - b, _ := tf.Format(WithField("test", value)) - idx := bytes.Index(b, ([]byte)("test=")) - cont := bytes.Contains(b[idx+5:], []byte{'"'}) - if cont != q { - if q { - t.Errorf("quoting expected for: %#v", value) - } else { - t.Errorf("quoting not expected for: %#v", value) - } - } - } - - checkQuoting(false, "abcd") - checkQuoting(false, "v1.0") - checkQuoting(false, "1234567890") - checkQuoting(true, "/foobar") - checkQuoting(true, "x y") - checkQuoting(true, "x,y") - checkQuoting(false, errors.New("invalid")) - checkQuoting(true, errors.New("invalid argument")) -} - -func TestTimestampFormat(t *testing.T) { - checkTimeStr := func(format string) { - customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} - customStr, _ := customFormatter.Format(WithField("test", "test")) - timeStart := bytes.Index(customStr, ([]byte)("time=")) - timeEnd := bytes.Index(customStr, ([]byte)("level=")) - timeStr := customStr[timeStart+5 : timeEnd-1] - if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { - timeStr = timeStr[1 : len(timeStr)-1] - } - if format == "" { - format = time.RFC3339 - } - _, e := time.Parse(format, (string)(timeStr)) - if e != nil { - t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) - } - } - - checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") - checkTimeStr("Mon Jan _2 15:04:05 2006") - checkTimeStr("") -} - -// TODO add tests for sorting etc., this requires a parser for the text -// formatter output. diff --git a/vendor/github.com/boltdb/bolt/.gitignore b/vendor/github.com/boltdb/bolt/.gitignore new file mode 100644 index 00000000..c7bd2b7a --- /dev/null +++ b/vendor/github.com/boltdb/bolt/.gitignore @@ -0,0 +1,4 @@ +*.prof +*.test +*.swp +/bin/ diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/boltdb/bolt/LICENSE new file mode 100644 index 00000000..004e77fe --- /dev/null +++ b/vendor/github.com/boltdb/bolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/boltdb/bolt/Makefile b/vendor/github.com/boltdb/bolt/Makefile new file mode 100644 index 00000000..e035e63a --- /dev/null +++ b/vendor/github.com/boltdb/bolt/Makefile @@ -0,0 +1,18 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" + +# go get github.com/kisielk/errcheck +errcheck: + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt + +test: + @go test -v -cover . + @go test -v ./cmd/bolt + +.PHONY: fmt test diff --git a/vendor/github.com/boltdb/bolt/README.md b/vendor/github.com/boltdb/bolt/README.md new file mode 100644 index 00000000..66b19ace --- /dev/null +++ b/vendor/github.com/boltdb/bolt/README.md @@ -0,0 +1,844 @@ +Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg) +==== + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: http://symas.com/mdb/ + +## Project Status + +Bolt is stable and the API is fixed. Full unit test coverage and randomized +black box testing are used to ensure database consistency and thread safety. +Bolt is currently in high-load production environments serving databases as +large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed +services every day. + +## Table of Contents + +- [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) +- [Resources](#resources) +- [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) +- [Caveats & Limitations](#caveats--limitations) +- [Reading the Source](#reading-the-source) +- [Other Projects Using Bolt](#other-projects-using-bolt) + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: + +```sh +$ go get github.com/boltdb/bolt/... +``` + +This will retrieve the library and install the `bolt` command line utility into +your `$GOBIN` path. + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + "github.com/boltdb/bolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Read-only transactions and read-write transactions should not depend on one +another and generally shouldn't be opened simultaneously in the same goroutine. +This can cause a deadlock as the read-write transaction needs to periodically +re-map the data file but it cannot do so while a read-only transaction is open. + + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `Tx.Begin()` function directly but **please** be sure to close +the transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `DB.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guaranteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ = b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket exists and has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +contstructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade-offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Reading the Source + +Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database. +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. + +If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/boltdb/bolt/appveyor.yml b/vendor/github.com/boltdb/bolt/appveyor.yml new file mode 100644 index 00000000..6e26e941 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/appveyor.yml @@ -0,0 +1,18 @@ +version: "{build}" + +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\boltdb\bolt + +environment: + GOPATH: c:\gopath + +install: + - echo %PATH% + - echo %GOPATH% + - go version + - go env + - go get -v -t ./... + +build_script: + - go test -v ./... diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/boltdb/bolt/bolt_386.go new file mode 100644 index 00000000..e659bfb9 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_386.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/boltdb/bolt/bolt_amd64.go new file mode 100644 index 00000000..cca6b7eb --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_amd64.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/boltdb/bolt/bolt_arm.go new file mode 100644 index 00000000..e659bfb9 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/boltdb/bolt/bolt_arm64.go new file mode 100644 index 00000000..6d230935 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm64.go @@ -0,0 +1,9 @@ +// +build arm64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/boltdb/bolt/bolt_linux.go new file mode 100644 index 00000000..2b676661 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_linux.go @@ -0,0 +1,10 @@ +package bolt + +import ( + "syscall" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/boltdb/bolt/bolt_openbsd.go new file mode 100644 index 00000000..7058c3d7 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_openbsd.go @@ -0,0 +1,27 @@ +package bolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/boltdb/bolt/bolt_ppc.go new file mode 100644 index 00000000..645ddc3e --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc.go @@ -0,0 +1,9 @@ +// +build ppc + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/boltdb/bolt/bolt_ppc64.go new file mode 100644 index 00000000..2dc6be02 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64.go @@ -0,0 +1,9 @@ +// +build ppc64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go new file mode 100644 index 00000000..8351e129 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go @@ -0,0 +1,9 @@ +// +build ppc64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/boltdb/bolt/bolt_s390x.go new file mode 100644 index 00000000..f4dd26bb --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_s390x.go @@ -0,0 +1,9 @@ +// +build s390x + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/boltdb/bolt/bolt_unix.go new file mode 100644 index 00000000..cad62dda --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix.go @@ -0,0 +1,89 @@ +// +build !windows,!plan9,!solaris + +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + flag := syscall.LOCK_SH + if exclusive { + flag = syscall.LOCK_EX + } + + // Otherwise attempt to obtain an exclusive lock. + err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := syscall.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} + +// NOTE: This function is copied from stdlib because it is not available on darwin. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go new file mode 100644 index 00000000..307bf2b3 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -0,0 +1,90 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Whence = 0 + lock.Pid = 0 + if exclusive { + lock.Type = syscall.F_WRLCK + } else { + lock.Type = syscall.F_RDLCK + } + err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/boltdb/bolt/bolt_windows.go new file mode 100644 index 00000000..d538e6af --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_windows.go @@ -0,0 +1,144 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + lockExt = ".lock" + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + // Create a separate lock file on windows because a process + // cannot share an exclusive lock on the same file. This is + // needed during Tx.WriteTo(). + f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) + if err != nil { + return err + } + db.lockfile = f + + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + + err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) + db.lockfile.Close() + os.Remove(db.path+lockExt) + return err +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/boltdb/bolt/boltsync_unix.go new file mode 100644 index 00000000..f5044252 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/boltsync_unix.go @@ -0,0 +1,8 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bolt + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/boltdb/bolt/bucket.go new file mode 100644 index 00000000..d2f8c524 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bucket.go @@ -0,0 +1,748 @@ +package bolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const ( + maxUint = ^uint(0) + minUint = 0 + maxInt = int(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } else { + return nil, ErrIncompatibleValue + } + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if v == nil { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + _, _, flags := c.seek(key) + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * int(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += used + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += used + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += int(lastElement.pos + lastElement.ksize) + s.BranchInuse += used + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + len(inode.key) + len(inode.value) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() int { + return b.tx.db.pageSize / 4 +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/boltdb/bolt/cursor.go new file mode 100644 index 00000000..1be9f35e --- /dev/null +++ b/vendor/github.com/boltdb/bolt/cursor.go @@ -0,0 +1,400 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.index >= ref.count() { + return nil, nil, 0 + } + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(int(ref.index)) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/boltdb/bolt/db.go new file mode 100644 index 00000000..501d36aa --- /dev/null +++ b/vendor/github.com/boltdb/bolt/db.go @@ -0,0 +1,993 @@ +package bolt + +import ( + "errors" + "fmt" + "hash/fnv" + "log" + "os" + "runtime" + "runtime/debug" + "strings" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + + path string + file *os.File + lockfile *os.File // windows only + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + filesz int // current on disk file size + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + freelist *freelist + stats Stats + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + var db = &DB{opened: true} + + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + // Open data file and separate sync handler for metadata writes. + db.path = path + var err error + if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + return nil, err + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + if _, err := db.file.ReadAt(buf[:], 0); err == nil { + m := db.pageInBuffer(buf[:], 0).meta() + if err := m.validate(); err != nil { + return nil, err + } + db.pageSize = int(m.pageSize) + } + } + + // Memory map the data file. + if err := db.mmap(options.InitialMmapSize); err != nil { + _ = db.close() + return nil, err + } + + // Read in the freelist. + db.freelist = newFreelist() + db.freelist.read(db.page(db.meta().freelist)) + + // Mark the database as opened and return. + return db, nil +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + var size = int(info.Size()) + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. + if err := db.meta0.validate(); err != nil { + return err + } + if err := db.meta1.validate(); err != nil { + return err + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Set the page size to the OS page size. + db.pageSize = os.Getpagesize() + + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf[:], pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf[:], pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf[:], pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + + return nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.RLock() + defer db.mmaplock.RUnlock() + + return db.close() +} + +func (db *DB) close() error { + if !db.opened { + return nil + } + + db.opened = false + + db.freelist = nil + db.path = "" + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + if err := funlock(db); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be dependent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + + // Free any pages associated with closed read-only transactions. + var minid txid = 0xFFFFFFFFFFFFFFFF + for _, t := range db.txs { + if t.meta.txid < minid { + minid = t.meta.txid + } + } + if minid > 0 { + db.freelist.release(minid - 1) + } + + return t, nil +} + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + db.txs = append(db.txs[:i], db.txs[i+1:]...) + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + if err := t.Rollback(); err != nil { + return err + } + + return nil +} + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + if c.err != nil { + c.err <- err + } + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + if db.meta0.txid > db.meta1.txid { + return db.meta0 + } + return db.meta1 +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(count int) (*page, error) { + // Allocate a temporary buffer for the page. + buf := make([]byte, count*db.pageSize) + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = other.TxN - s.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +func (s *Stats) add(other *Stats) { + s.TxStats.add(&other.TxStats) +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } else if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid { + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +func printstack() { + stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") + fmt.Fprintln(os.Stderr, stack) +} diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/boltdb/bolt/doc.go new file mode 100644 index 00000000..cc937845 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/doc.go @@ -0,0 +1,44 @@ +/* +Package bolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + + +Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + + +Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. + + +*/ +package bolt diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/boltdb/bolt/errors.go new file mode 100644 index 00000000..6883786d --- /dev/null +++ b/vendor/github.com/boltdb/bolt/errors.go @@ -0,0 +1,70 @@ +package bolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when a data file is not a Bolt-formatted database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go new file mode 100644 index 00000000..0161948f --- /dev/null +++ b/vendor/github.com/boltdb/bolt/freelist.go @@ -0,0 +1,242 @@ +package bolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + ids []pgid // all free and available free page ids. + pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist() *freelist { + return &freelist{ + pending: make(map[txid][]pgid), + cache: make(map[pgid]bool), + } +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// free_count returns count of free pages +func (f *freelist) free_count() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, list := range f.pending { + count += len(list) + } + return count +} + +// all returns a list of all free ids and all pending ids in one sorted list. +func (f *freelist) all() []pgid { + m := make(pgids, 0) + + for _, list := range f.pending { + m = append(m, list...) + } + + sort.Sort(m) + return pgids(f.ids).merge(m) +} + +// allocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) allocate(n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + var ids = f.pending[txid] + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + + // Add to the freelist and cache. + ids = append(ids, id) + f.cache[id] = true + } + f.pending[txid] = ids +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, ids := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + for _, id := range f.pending[txid] { + delete(f.cache, id) + } + + // Remove pages from pending list. + delete(f.pending, txid) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + idx, count := 0, int(p.count) + if count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + + // Copy the list of page ids from the freelist. + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + f.ids = make([]pgid, len(ids)) + copy(f.ids, ids) + + // Make sure they're sorted. + sort.Sort(pgids(f.ids)) + + // Rebuild the page cache. + f.reindex() +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + ids := f.all() + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + if len(ids) < 0xFFFF { + p.count = uint16(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) + } else { + p.count = 0xFFFF + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.ids { + if !pcache[id] { + a = append(a, id) + } + } + f.ids = a + + // Once the available list is rebuilt then rebuild the free cache so that + // it includes the available and pending free pages. + f.reindex() +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + f.cache = make(map[pgid]bool) + for _, id := range f.ids { + f.cache[id] = true + } + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + f.cache[pendingID] = true + } + } +} diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/boltdb/bolt/node.go new file mode 100644 index 00000000..e9d64af8 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/node.go @@ -0,0 +1,599 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + } + return sz +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v int) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() int { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Loop over each item and write it to the page. + b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // If the length of key+value is larger than the max allocation size + // then we need to reallocate the byte array pointer. + // + // See: https://github.com/boltdb/bolt/pull/335 + klen, vlen := len(item.key), len(item.value) + if len(b) < klen+vlen { + b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] + } + + // Write data for the element to the end of the page. + copy(b[0:], item.key) + b = b[klen:] + copy(b[0:], item.value) + b = b[vlen:] + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize int) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize int) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz int) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = i + inode := n.inodes[i] + elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if i >= minKeysPerPage && sz+elsize > threshold { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(tx.db.pageSize) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/boltdb/bolt/page.go new file mode 100644 index 00000000..818aa1b1 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/page.go @@ -0,0 +1,172 @@ +package bolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) + +const minKeysPerPage = 2 + +const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) +const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafe.Pointer(&p.ptr)) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize] +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } else if len(b) == 0 { + return a + } + + // Create a list to hold all elements from both lists. + merged := make(pgids, 0, len(a)+len(b)) + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + merged = append(merged, follow...) + + return merged +} diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/boltdb/bolt/tx.go new file mode 100644 index 00000000..b8510fdb --- /dev/null +++ b/vendor/github.com/boltdb/bolt/tx.go @@ -0,0 +1,666 @@ +package bolt + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + if err := fn(k, tx.root.Bucket(k)); err != nil { + return err + } + return nil + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + opgid := tx.meta.pgid + + // Free the freelist and allocate new pages for it. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + // Only the first consistency error is reported in the panic. + if tx.db.StrictMode { + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.rollback() + return nil +} + +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. Use WriteTo() instead. +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader with WriteFlag + f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { _ = f.Close() }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + return n, fmt.Errorf("seek: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + return n, err + } + + return n, f.Close() +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + err = tx.Copy(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Check if any pages are double freed. + freed := make(map[pgid]bool) + for _, id := range tx.db.freelist.all() { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount++ + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + size := (int(p.overflow) + 1) * tx.db.pageSize + offset := int64(p.id) * int64(tx.db.pageSize) + + // Write out page in "max allocation" sized chunks. + ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + for { + // Limit our write to our max allocation size. + sz := size + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + + // Write chunk to disk. + buf := ptr[:sz] + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + size -= sz + if size == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Clear out page cache. + tx.pages = make(map[pgid]*page) + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary buffered page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/candiedyaml_suite_test.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/candiedyaml_suite_test.go deleted file mode 100644 index 0b97fe94..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/candiedyaml_suite_test.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "testing" -) - -func TestCandiedyaml(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Candiedyaml Suite") -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode_test.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode_test.go deleted file mode 100644 index dd180b57..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode_test.go +++ /dev/null @@ -1,915 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "math" - "os" - "strconv" - "strings" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Decode", func() { - It("Decodes a file", func() { - f, _ := os.Open("fixtures/specification/example2_1.yaml") - d := NewDecoder(f) - var v interface{} - err := d.Decode(&v) - - Expect(err).NotTo(HaveOccurred()) - }) - - Context("strings", func() { - It("Decodes an empty string", func() { - d := NewDecoder(strings.NewReader(`"" -`)) - var v string - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal("")) - }) - - It("Decodes an empty string to an interface", func() { - d := NewDecoder(strings.NewReader(`"" -`)) - var v interface{} - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal("")) - }) - - It("Decodes a map containing empty strings to an interface", func() { - d := NewDecoder(strings.NewReader(`"" : "" -`)) - var v interface{} - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[interface{}]interface{}{"": ""})) - }) - - It("Decodes strings starting with a colon", func() { - d := NewDecoder(strings.NewReader(`:colon -`)) - var v interface{} - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(":colon")) - }) - }) - - Context("Sequence", func() { - It("Decodes to interface{}s", func() { - f, _ := os.Open("fixtures/specification/example2_1.yaml") - d := NewDecoder(f) - var v interface{} - err := d.Decode(&v) - - Expect(err).NotTo(HaveOccurred()) - Expect((v).([]interface{})).To(Equal([]interface{}{"Mark McGwire", "Sammy Sosa", "Ken Griffey"})) - }) - - It("Decodes to []string", func() { - f, _ := os.Open("fixtures/specification/example2_1.yaml") - d := NewDecoder(f) - v := make([]string, 0, 3) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal([]string{"Mark McGwire", "Sammy Sosa", "Ken Griffey"})) - }) - - It("Decodes a sequence of maps", func() { - f, _ := os.Open("fixtures/specification/example2_12.yaml") - d := NewDecoder(f) - v := make([]map[string]interface{}, 1) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal([]map[string]interface{}{ - {"item": "Super Hoop", "quantity": int64(1)}, - {"item": "Basketball", "quantity": int64(4)}, - {"item": "Big Shoes", "quantity": int64(1)}, - })) - - }) - - Describe("As structs", func() { - It("Simple struct", func() { - f, _ := os.Open("fixtures/specification/example2_4.yaml") - d := NewDecoder(f) - - type batter struct { - Name string - HR int64 - AVG float64 - } - v := make([]batter, 0, 1) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal([]batter{ - batter{Name: "Mark McGwire", HR: 65, AVG: 0.278}, - batter{Name: "Sammy Sosa", HR: 63, AVG: 0.288}, - })) - - }) - - It("Tagged struct", func() { - f, _ := os.Open("fixtures/specification/example2_4.yaml") - d := NewDecoder(f) - - type batter struct { - N string `yaml:"name"` - H int64 `yaml:"hr"` - A float64 `yaml:"avg"` - } - v := make([]batter, 0, 1) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal([]batter{ - batter{N: "Mark McGwire", H: 65, A: 0.278}, - batter{N: "Sammy Sosa", H: 63, A: 0.288}, - })) - - }) - - It("handles null values", func() { - type S struct { - Default interface{} - } - - d := NewDecoder(strings.NewReader(` ---- -default: -`)) - var s S - err := d.Decode(&s) - Expect(err).NotTo(HaveOccurred()) - Expect(s).To(Equal(S{Default: nil})) - - }) - - It("ignores missing tags", func() { - f, _ := os.Open("fixtures/specification/example2_4.yaml") - d := NewDecoder(f) - - type batter struct { - N string `yaml:"name"` - HR int64 - A float64 - } - v := make([]batter, 0, 1) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal([]batter{ - batter{N: "Mark McGwire", HR: 65}, - batter{N: "Sammy Sosa", HR: 63}, - })) - - }) - }) - - It("Decodes a sequence of sequences", func() { - f, _ := os.Open("fixtures/specification/example2_5.yaml") - d := NewDecoder(f) - v := make([][]interface{}, 1) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal([][]interface{}{ - {"name", "hr", "avg"}, - {"Mark McGwire", int64(65), float64(0.278)}, - {"Sammy Sosa", int64(63), float64(0.288)}, - })) - - }) - }) - - Context("Maps", func() { - It("Decodes to interface{}s", func() { - f, _ := os.Open("fixtures/specification/example2_2.yaml") - d := NewDecoder(f) - var v interface{} - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect((v).(map[interface{}]interface{})).To(Equal(map[interface{}]interface{}{ - "hr": int64(65), - "avg": float64(0.278), - "rbi": int64(147), - })) - - }) - - It("Decodes to a struct", func() { - f, _ := os.Open("fixtures/specification/example2_2.yaml") - d := NewDecoder(f) - - type batter struct { - HR int64 - AVG float64 - RBI int64 - } - v := batter{} - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(batter{HR: 65, AVG: 0.278, RBI: 147})) - }) - - It("Decodes to a map of string arrays", func() { - f, _ := os.Open("fixtures/specification/example2_9.yaml") - d := NewDecoder(f) - v := make(map[string][]string) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string][]string{"hr": []string{"Mark McGwire", "Sammy Sosa"}, "rbi": []string{"Sammy Sosa", "Ken Griffey"}})) - }) - }) - - Context("Sequence of Maps", func() { - It("Decodes to interface{}s", func() { - f, _ := os.Open("fixtures/specification/example2_4.yaml") - d := NewDecoder(f) - var v interface{} - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect((v).([]interface{})).To(Equal([]interface{}{ - map[interface{}]interface{}{"name": "Mark McGwire", "hr": int64(65), "avg": float64(0.278)}, - map[interface{}]interface{}{"name": "Sammy Sosa", "hr": int64(63), "avg": float64(0.288)}, - })) - - }) - }) - - It("Decodes ascii art", func() { - f, _ := os.Open("fixtures/specification/example2_13.yaml") - d := NewDecoder(f) - v := "" - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(`\//||\/|| -// || ||__ -`)) - - }) - - It("Decodes folded strings", func() { - f, _ := os.Open("fixtures/specification/example2_15.yaml") - d := NewDecoder(f) - v := "" - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal("Sammy Sosa completed another fine season with great stats.\n\n 63 Home Runs\n 0.288 Batting Average\n\nWhat a year!\n")) - }) - - It("Decodes literal and folded strings with indents", func() { - f, _ := os.Open("fixtures/specification/example2_16.yaml") - d := NewDecoder(f) - v := make(map[string]string) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]string{ - "name": "Mark McGwire", - "accomplishment": `Mark set a major league home run record in 1998. -`, - "stats": `65 Home Runs -0.278 Batting Average -`, - })) - - }) - - It("Decodes single quoted", func() { - f, _ := os.Open("fixtures/specification/example2_17_quoted.yaml") - d := NewDecoder(f) - v := make(map[string]string) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]string{ - "quoted": ` # not a 'comment'.`, - })) - - }) - - Context("ints", func() { - It("Decodes into an interface{}", func() { - f, _ := os.Open("fixtures/specification/example2_19.yaml") - d := NewDecoder(f) - v := make(map[string]interface{}) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]interface{}{ - "canonical": int64(12345), - "decimal": int64(12345), - "octal": int64(12), - "hexadecimal": int64(12), - })) - - }) - - It("Decodes into int64", func() { - f, _ := os.Open("fixtures/specification/example2_19.yaml") - d := NewDecoder(f) - v := make(map[string]int64) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]int64{ - "canonical": int64(12345), - "decimal": int64(12345), - "octal": int64(12), - "hexadecimal": int64(12), - })) - - }) - - Context("boundary values", func() { - intoInt64 := func(val int64) { - It("Decodes into an int64 value", func() { - var v int64 - - d := NewDecoder(strings.NewReader(strconv.FormatInt(val, 10))) - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(val)) - - }) - } - - intoInt := func(val int) { - It("Decodes into an int value", func() { - var v int - - d := NewDecoder(strings.NewReader(strconv.FormatInt(int64(val), 10))) - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(val)) - - }) - } - - intoInterface := func(val int64) { - It("Decodes into an interface{}", func() { - var v interface{} - - d := NewDecoder(strings.NewReader(strconv.FormatInt(val, 10))) - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(val)) - }) - } - - intoInt64(math.MaxInt64) - intoInterface(math.MaxInt64) - - intoInt64(math.MinInt64) - intoInterface(math.MinInt64) - - intoInt(math.MaxInt32) - intoInt(math.MinInt32) - }) - }) - - It("Decodes a variety of floats", func() { - f, _ := os.Open("fixtures/specification/example2_20.yaml") - d := NewDecoder(f) - v := make(map[string]float64) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - - Expect(math.IsNaN(v["not a number"])).To(BeTrue()) - delete(v, "not a number") - - Expect(v).To(Equal(map[string]float64{ - "canonical": float64(1230.15), - "exponential": float64(1230.15), - "fixed": float64(1230.15), - "negative infinity": math.Inf(-1), - })) - - }) - - It("Decodes booleans, nil and strings", func() { - f, _ := os.Open("fixtures/specification/example2_21.yaml") - d := NewDecoder(f) - v := make(map[string]interface{}) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]interface{}{ - "": interface{}(nil), - "true": true, - "false": false, - "string": "12345", - })) - - }) - - It("Decodes a null ptr", func() { - d := NewDecoder(strings.NewReader(`null -`)) - var v *bool - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(BeNil()) - }) - - It("Decodes dates/time", func() { - f, _ := os.Open("fixtures/specification/example2_22.yaml") - d := NewDecoder(f) - v := make(map[string]time.Time) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]time.Time{ - "canonical": time.Date(2001, time.December, 15, 2, 59, 43, int(1*time.Millisecond), time.UTC), - "iso8601": time.Date(2001, time.December, 14, 21, 59, 43, int(10*time.Millisecond), time.FixedZone("", -5*3600)), - "spaced": time.Date(2001, time.December, 14, 21, 59, 43, int(10*time.Millisecond), time.FixedZone("", -5*3600)), - "date": time.Date(2002, time.December, 14, 0, 0, 0, 0, time.UTC), - })) - - }) - - Context("Tags", func() { - It("Respects tags", func() { - f, _ := os.Open("fixtures/specification/example2_23_non_date.yaml") - d := NewDecoder(f) - v := make(map[string]string) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]string{ - "not-date": "2002-04-28", - })) - - }) - - It("handles non-specific tags", func() { - d := NewDecoder(strings.NewReader(` ---- -not_parsed: ! 123 -`)) - v := make(map[string]int) - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]int{"not_parsed": 123})) - }) - - It("handles non-specific tags", func() { - d := NewDecoder(strings.NewReader(` ---- -? a complex key -: ! "123" -`)) - v := make(map[string]string) - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]string{"a complex key": "123"})) - }) - }) - - Context("Decodes binary/base64", func() { - It("to []byte", func() { - f, _ := os.Open("fixtures/specification/example2_23_picture.yaml") - d := NewDecoder(f) - v := make(map[string][]byte) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string][]byte{ - "picture": []byte{0x47, 0x49, 0x46, 0x38, 0x39, 0x61, 0x0c, 0x00, - 0x0c, 0x00, 0x84, 0x00, 0x00, 0xff, 0xff, 0xf7, 0xf5, 0xf5, 0xee, - 0xe9, 0xe9, 0xe5, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0xe7, 0xe7, - 0xe7, 0x5e, 0x5e, 0x5e, 0xf3, 0xf3, 0xed, 0x8e, 0x8e, 0x8e, 0xe0, - 0xe0, 0xe0, 0x9f, 0x9f, 0x9f, 0x93, 0x93, 0x93, 0xa7, 0xa7, 0xa7, - 0x9e, 0x9e, 0x9e, 0x69, 0x5e, 0x10, 0x27, 0x20, 0x82, 0x0a, 0x01, - 0x00, 0x3b}, - })) - - }) - - It("to string", func() { - d := NewDecoder(strings.NewReader("!binary YWJjZGVmZw==")) - var v string - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal("abcdefg")) - }) - - It("to string via alternate form", func() { - d := NewDecoder(strings.NewReader("!!binary YWJjZGVmZw==")) - var v string - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal("abcdefg")) - }) - - It("to interface", func() { - d := NewDecoder(strings.NewReader("!binary YWJjZGVmZw==")) - var v interface{} - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal([]byte("abcdefg"))) - }) - }) - - Context("Aliases", func() { - Context("to known types", func() { - It("aliases scalars", func() { - f, _ := os.Open("fixtures/specification/example2_10.yaml") - d := NewDecoder(f) - v := make(map[string][]string) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string][]string{ - "hr": {"Mark McGwire", "Sammy Sosa"}, - "rbi": {"Sammy Sosa", "Ken Griffey"}, - })) - - }) - - It("aliases sequences", func() { - d := NewDecoder(strings.NewReader(` ---- -hr: &ss - - MG - - SS -rbi: *ss -`)) - v := make(map[string][]string) - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string][]string{ - "hr": {"MG", "SS"}, - "rbi": {"MG", "SS"}, - })) - - }) - - It("aliases maps", func() { - d := NewDecoder(strings.NewReader(` ---- -hr: &ss - MG : SS -rbi: *ss -`)) - v := make(map[string]map[string]string) - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]map[string]string{ - "hr": {"MG": "SS"}, - "rbi": {"MG": "SS"}, - })) - - }) - }) - - It("aliases to different types", func() { - type S struct { - A map[string]int - C map[string]string - } - d := NewDecoder(strings.NewReader(` ---- -a: &map - b : 1 -c: *map -`)) - var s S - err := d.Decode(&s) - Expect(err).NotTo(HaveOccurred()) - Expect(s).To(Equal(S{ - A: map[string]int{"b": 1}, - C: map[string]string{"b": "1"}, - })) - - }) - - It("fails if an anchor is undefined", func() { - d := NewDecoder(strings.NewReader(` ---- -a: *missing -`)) - m := make(map[string]string) - err := d.Decode(&m) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(MatchRegexp("missing anchor.*line.*column.*")) - }) - - Context("to Interface", func() { - It("aliases scalars", func() { - f, _ := os.Open("fixtures/specification/example2_10.yaml") - d := NewDecoder(f) - v := make(map[string]interface{}) - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]interface{}{ - "hr": []interface{}{"Mark McGwire", "Sammy Sosa"}, - "rbi": []interface{}{"Sammy Sosa", "Ken Griffey"}, - })) - - }) - - It("aliases sequences", func() { - d := NewDecoder(strings.NewReader(` ---- -hr: &ss - - MG - - SS -rbi: *ss -`)) - v := make(map[string]interface{}) - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]interface{}{ - "hr": []interface{}{"MG", "SS"}, - "rbi": []interface{}{"MG", "SS"}, - })) - - }) - - It("aliases maps", func() { - d := NewDecoder(strings.NewReader(` ---- -hr: &ss - MG : SS -rbi: *ss -`)) - v := make(map[string]interface{}) - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]interface{}{ - "hr": map[interface{}]interface{}{"MG": "SS"}, - "rbi": map[interface{}]interface{}{"MG": "SS"}, - })) - - }) - - It("supports duplicate aliases", func() { - d := NewDecoder(strings.NewReader(` ---- -a: &a - b: 1 -x: *a -y: *a -`)) - v := make(map[string]interface{}) - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]interface{}{ - "a": map[interface{}]interface{}{"b": int64(1)}, - "x": map[interface{}]interface{}{"b": int64(1)}, - "y": map[interface{}]interface{}{"b": int64(1)}, - })) - - }) - - It("supports overriden anchors", func() { - d := NewDecoder(strings.NewReader(` ---- -First occurrence: &anchor Foo -Second occurrence: *anchor -Override anchor: &anchor Bar -Reuse anchor: *anchor -`)) - v := make(map[string]interface{}) - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]interface{}{ - "First occurrence": "Foo", - "Second occurrence": "Foo", - "Override anchor": "Bar", - "Reuse anchor": "Bar", - })) - - }) - - It("fails if an anchor is undefined", func() { - d := NewDecoder(strings.NewReader(` ---- -a: *missing -`)) - var i interface{} - err := d.Decode(&i) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(MatchRegexp("missing anchor.*line.*column.*")) - }) - - }) - - It("supports composing aliases", func() { - d := NewDecoder(strings.NewReader(` ---- -a: &a b -x: &b - d: *a -z: *b -`)) - v := make(map[string]interface{}) - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]interface{}{ - "a": "b", - "x": map[interface{}]interface{}{"d": "b"}, - "z": map[interface{}]interface{}{"d": "b"}, - })) - - }) - - It("redefinition while composing aliases", func() { - d := NewDecoder(strings.NewReader(` ---- -a: &a b -x: &c - d : &a 1 -y: *a -`)) - v := make(map[string]interface{}) - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(Equal(map[string]interface{}{ - "a": "b", - "x": map[interface{}]interface{}{"d": int64(1)}, - "y": int64(1), - })) - - }) - - It("can parse nested anchors", func() { - d := NewDecoder(strings.NewReader(` ---- -a: - aa: &x - aaa: 1 - ab: - aba: &y - abaa: - abaaa: *x -b: -- ba: - baa: *y -`)) - v := make(map[string]interface{}) - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - }) - }) - - Context("When decoding fails", func() { - It("returns an error", func() { - f, _ := os.Open("fixtures/specification/example_empty.yaml") - d := NewDecoder(f) - var v interface{} - - err := d.Decode(&v) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("Expected document start at line 0, column 0")) - }) - }) - - Context("Unmarshaler support", func() { - Context("Receiver is a value", func() { - It("the Marshaler interface is not used", func() { - d := NewDecoder(strings.NewReader("abc\n")) - v := hasMarshaler{} - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v.Value).To(BeNil()) - }) - }) - - Context("Receiver is a pointer", func() { - It("uses the Marshaler interface when a pointer", func() { - d := NewDecoder(strings.NewReader("abc\n")) - v := hasPtrMarshaler{} - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - }) - - It("marshals a scalar", func() { - d := NewDecoder(strings.NewReader("abc\n")) - v := hasPtrMarshaler{} - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v.Tag).To(Equal(yaml_STR_TAG)) - Expect(v.Value).To(Equal("abc")) - }) - - It("marshals a sequence", func() { - d := NewDecoder(strings.NewReader("[abc, def]\n")) - v := hasPtrMarshaler{} - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v.Tag).To(Equal(yaml_SEQ_TAG)) - Expect(v.Value).To(Equal([]interface{}{"abc", "def"})) - }) - - It("marshals a map", func() { - d := NewDecoder(strings.NewReader("{ a: bc}\n")) - v := hasPtrMarshaler{} - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v.Tag).To(Equal(yaml_MAP_TAG)) - Expect(v.Value).To(Equal(map[interface{}]interface{}{"a": "bc"})) - }) - }) - }) - - Context("Marshals into a Number", func() { - It("when the number is an int", func() { - d := NewDecoder(strings.NewReader("123\n")) - d.UseNumber() - var v Number - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v.String()).To(Equal("123")) - }) - - It("when the number is an float", func() { - d := NewDecoder(strings.NewReader("1.23\n")) - d.UseNumber() - var v Number - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v.String()).To(Equal("1.23")) - }) - - It("it fails when its a non-Number", func() { - d := NewDecoder(strings.NewReader("on\n")) - d.UseNumber() - var v Number - - err := d.Decode(&v) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(MatchRegexp("Not a number: 'on' at line 0, column 0")) - }) - - It("returns a Number", func() { - d := NewDecoder(strings.NewReader("123\n")) - d.UseNumber() - var v interface{} - - err := d.Decode(&v) - Expect(err).NotTo(HaveOccurred()) - Expect(v).To(BeAssignableToTypeOf(Number(""))) - - n := v.(Number) - Expect(n.String()).To(Equal("123")) - }) - }) - Context("When there are special characters", func() { - It("returns an error", func() { - d := NewDecoder(strings.NewReader(` ---- -applications: - - name: m - services: - - !@# -`)) - var v interface{} - - err := d.Decode(&v) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(MatchRegexp("yaml.*did not find.*line.*column.*")) - }) - }) -}) diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode_test.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode_test.go deleted file mode 100644 index 82563c79..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode_test.go +++ /dev/null @@ -1,642 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" - "errors" - "math" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Encode", func() { - var buf *bytes.Buffer - var enc *Encoder - - BeforeEach(func() { - buf = &bytes.Buffer{} - enc = NewEncoder(buf) - }) - - Context("Scalars", func() { - It("handles strings", func() { - err := enc.Encode("abc") - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`abc -`)) - - }) - - It("handles really short strings", func() { - err := enc.Encode(".") - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`. -`)) - - }) - - It("encodes strings with multilines", func() { - err := enc.Encode("a\nc") - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`|- - a - c -`)) - - }) - - It("handles strings that match known scalars", func() { - err := enc.Encode("true") - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`"true" -`)) - - }) - - It("handles strings that contain colons followed by whitespace", func() { - err := enc.Encode("contains: colon") - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`'contains: colon' -`)) - - }) - - Context("handles ints", func() { - It("handles ints", func() { - err := enc.Encode(13) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal("13\n")) - }) - - It("handles uints", func() { - err := enc.Encode(uint64(1)) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal("1\n")) - }) - }) - - Context("handles floats", func() { - It("handles float32", func() { - err := enc.Encode(float32(1.234)) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal("1.234\n")) - - }) - - It("handles float64", func() { - err := enc.Encode(float64(1.2e23)) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal("1.2e+23\n")) - }) - - It("handles NaN", func() { - err := enc.Encode(math.NaN()) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(".nan\n")) - }) - - It("handles infinity", func() { - err := enc.Encode(math.Inf(-1)) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal("-.inf\n")) - }) - }) - - It("handles bools", func() { - err := enc.Encode(true) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal("true\n")) - }) - - It("handles time.Time", func() { - t := time.Now() - err := enc.Encode(t) - Expect(err).NotTo(HaveOccurred()) - bytes, _ := t.MarshalText() - Expect(buf.String()).To(Equal(string(bytes) + "\n")) - }) - - Context("Null", func() { - It("fails on nil", func() { - err := enc.Encode(nil) - Expect(err).To(HaveOccurred()) - }) - }) - - It("handles []byte", func() { - err := enc.Encode([]byte{'a', 'b', 'c'}) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal("!!binary YWJj\n")) - }) - - Context("Ptrs", func() { - It("handles ptr of a type", func() { - p := new(int) - *p = 10 - err := enc.Encode(p) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal("10\n")) - }) - - It("handles nil ptr", func() { - var p *int - err := enc.Encode(p) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal("null\n")) - }) - }) - - Context("Structs", func() { - It("handles simple structs", func() { - type batter struct { - Name string - HR int64 - AVG float64 - } - - batters := []batter{ - batter{Name: "Mark McGwire", HR: 65, AVG: 0.278}, - batter{Name: "Sammy Sosa", HR: 63, AVG: 0.288}, - } - err := enc.Encode(batters) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`- Name: Mark McGwire - HR: 65 - AVG: 0.278 -- Name: Sammy Sosa - HR: 63 - AVG: 0.288 -`)) - - }) - - It("handles tagged structs", func() { - type batter struct { - Name string `yaml:"name"` - HR int64 - AVG float64 `yaml:"avg"` - } - - batters := []batter{ - batter{Name: "Mark McGwire", HR: 65, AVG: 0.278}, - batter{Name: "Sammy Sosa", HR: 63, AVG: 0.288}, - } - err := enc.Encode(batters) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`- name: Mark McGwire - HR: 65 - avg: 0.278 -- name: Sammy Sosa - HR: 63 - avg: 0.288 -`)) - - }) - - It("handles nested structs", func() { - type nestedConfig struct { - AString string `yaml:"str"` - Integer int `yaml:"int"` - } - type config struct { - TopString string - Nested nestedConfig - } - - cfg := config{ - TopString: "def", - Nested: nestedConfig{ - AString: "abc", - Integer: 123, - }, - } - - err := enc.Encode(cfg) - Expect(err).NotTo(HaveOccurred()) - - Expect(buf.String()).To(Equal(`TopString: def -Nested: - str: abc - int: 123 -`)) - - }) - - It("handles inline structs", func() { - type NestedConfig struct { - AString string `yaml:"str"` - Integer int `yaml:"int"` - } - type config struct { - TopString string - NestedConfig - } - - cfg := config{ - TopString: "def", - NestedConfig: NestedConfig{ - AString: "abc", - Integer: 123, - }, - } - - err := enc.Encode(cfg) - Expect(err).NotTo(HaveOccurred()) - - Expect(buf.String()).To(Equal(`TopString: def -str: abc -int: 123 -`)) - - }) - - It("handles inline structs with conflicts", func() { - type NestedConfig struct { - AString string `yaml:"str"` - Integer int `yaml:"int"` - } - type config struct { - AString string `yaml:"str"` - NestedConfig - } - - cfg := config{ - AString: "def", - NestedConfig: NestedConfig{ - AString: "abc", - Integer: 123, - }, - } - - err := enc.Encode(cfg) - Expect(err).NotTo(HaveOccurred()) - - Expect(buf.String()).To(Equal(`str: def -int: 123 -`)) - - }) - - }) - - }) - - Context("Sequence", func() { - It("handles slices", func() { - val := []string{"a", "b", "c"} - err := enc.Encode(val) - Expect(err).NotTo(HaveOccurred()) - - Expect(buf.String()).To(Equal(`- a -- b -- c -`)) - - }) - }) - - Context("Maps", func() { - It("Encodes simple maps", func() { - err := enc.Encode(&map[string]string{ - "name": "Mark McGwire", - "hr": "65", - "avg": "0.278", - }) - Expect(err).NotTo(HaveOccurred()) - - Expect(buf.String()).To(Equal(`avg: "0.278" -hr: "65" -name: Mark McGwire -`)) - }) - - It("sorts by key when strings otherwise by kind", func() { - err := enc.Encode(&map[interface{}]string{ - 1.2: "float", - 8: "integer", - "name": "Mark McGwire", - "hr": "65", - "avg": "0.278", - }) - Expect(err).NotTo(HaveOccurred()) - - Expect(buf.String()).To(Equal(`8: integer -1.2: float -avg: "0.278" -hr: "65" -name: Mark McGwire -`)) - }) - - It("encodes mix types", func() { - err := enc.Encode(&map[string]interface{}{ - "name": "Mark McGwire", - "hr": 65, - "avg": 0.278, - }) - Expect(err).NotTo(HaveOccurred()) - - Expect(buf.String()).To(Equal(`avg: 0.278 -hr: 65 -name: Mark McGwire -`)) - }) - }) - - Context("Sequence of Maps", func() { - It("encodes", func() { - err := enc.Encode([]map[string]interface{}{ - {"name": "Mark McGwire", - "hr": 65, - "avg": 0.278, - }, - {"name": "Sammy Sosa", - "hr": 63, - "avg": 0.288, - }, - }) - Expect(err).NotTo(HaveOccurred()) - - Expect(buf.String()).To(Equal(`- avg: 0.278 - hr: 65 - name: Mark McGwire -- avg: 0.288 - hr: 63 - name: Sammy Sosa -`)) - - }) - }) - - Context("Maps of Sequence", func() { - It("encodes", func() { - err := enc.Encode(map[string][]interface{}{ - "name": []interface{}{"Mark McGwire", "Sammy Sosa"}, - "hr": []interface{}{65, 63}, - "avg": []interface{}{0.278, 0.288}, - }) - Expect(err).NotTo(HaveOccurred()) - - Expect(buf.String()).To(Equal(`avg: -- 0.278 -- 0.288 -hr: -- 65 -- 63 -name: -- Mark McGwire -- Sammy Sosa -`)) - - }) - }) - - Context("Flow", func() { - It("flows structs", func() { - type i struct { - A string - } - type o struct { - I i `yaml:"i,flow"` - } - - err := enc.Encode(o{ - I: i{A: "abc"}, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`i: {A: abc} -`)) - - }) - - It("flows sequences", func() { - type i struct { - A string - } - type o struct { - I []i `yaml:"i,flow"` - } - - err := enc.Encode(o{ - I: []i{{A: "abc"}}, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`i: [{A: abc}] -`)) - - }) - }) - - Context("Omit empty", func() { - It("omits nil ptrs", func() { - type i struct { - A *string `yaml:"a,omitempty"` - } - type o struct { - I []i `yaml:"i,flow"` - } - - err := enc.Encode(o{ - I: []i{{A: nil}}, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`i: [{}] -`)) - - }) - - }) - - Context("Skip field", func() { - It("does not include the field", func() { - type a struct { - B string `yaml:"-"` - C string - } - - err := enc.Encode(a{B: "b", C: "c"}) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`C: c -`)) - - }) - }) - - Context("Marshaler support", func() { - Context("Receiver is a value", func() { - It("uses the Marshaler interface when a value", func() { - err := enc.Encode(hasMarshaler{Value: 123}) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal("123\n")) - }) - - It("uses the Marshaler interface when a pointer", func() { - err := enc.Encode(&hasMarshaler{Value: "abc"}) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`abc -`)) - }) - - Context("when it fails", func() { - It("returns an error", func() { - err := enc.Encode(&hasMarshaler{Value: "abc", Error: errors.New("fail")}) - Expect(err).To(MatchError("fail")) - }) - }) - }) - - Context("Receiver is a pointer", func() { - It("uses the Marshaler interface when a pointer", func() { - err := enc.Encode(&hasPtrMarshaler{Value: map[string]string{"a": "b"}}) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`a: b -`)) - - }) - - It("skips the Marshaler when its a value", func() { - err := enc.Encode(hasPtrMarshaler{Value: map[string]string{"a": "b"}}) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`Tag: "" -Value: - a: b -Error: null -`)) - - }) - - Context("the receiver is nil", func() { - var ptr *hasPtrMarshaler - - Context("when it fails", func() { - It("returns an error", func() { - err := enc.Encode(&hasPtrMarshaler{Value: "abc", Error: errors.New("fail")}) - Expect(err).To(MatchError("fail")) - }) - }) - - It("returns a null", func() { - err := enc.Encode(ptr) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`null -`)) - - }) - - It("returns a null value for ptr types", func() { - err := enc.Encode(map[string]*hasPtrMarshaler{"a": ptr}) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`a: null -`)) - - }) - - It("panics when used as a nil interface", func() { - Expect(func() { enc.Encode(map[string]Marshaler{"a": ptr}) }).To(Panic()) - }) - }) - - Context("the receiver has a nil value", func() { - ptr := &hasPtrMarshaler{Value: nil} - - It("returns null", func() { - err := enc.Encode(ptr) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`null -`)) - - }) - - Context("in a map", func() { - It("returns a null value for ptr types", func() { - err := enc.Encode(map[string]*hasPtrMarshaler{"a": ptr}) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`a: null -`)) - - }) - - It("returns a null value for interface types", func() { - err := enc.Encode(map[string]Marshaler{"a": ptr}) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`a: null -`)) - - }) - }) - - Context("in a slice", func() { - It("returns a null value for ptr types", func() { - err := enc.Encode([]*hasPtrMarshaler{ptr}) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`- null -`)) - - }) - - It("returns a null value for interface types", func() { - err := enc.Encode([]Marshaler{ptr}) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal(`- null -`)) - - }) - }) - }) - }) - }) - - Context("Number type", func() { - It("encodes as a number", func() { - n := Number("12345") - err := enc.Encode(n) - Expect(err).NotTo(HaveOccurred()) - Expect(buf.String()).To(Equal("12345\n")) - }) - }) -}) - -type hasMarshaler struct { - Value interface{} - Error error -} - -func (m hasMarshaler) MarshalYAML() (string, interface{}, error) { - return "", m.Value, m.Error -} - -func (m hasMarshaler) UnmarshalYAML(tag string, value interface{}) error { - m.Value = value - return nil -} - -type hasPtrMarshaler struct { - Tag string - Value interface{} - Error error -} - -func (m *hasPtrMarshaler) MarshalYAML() (string, interface{}, error) { - return "", m.Value, m.Error -} - -func (m *hasPtrMarshaler) UnmarshalYAML(tag string, value interface{}) error { - m.Tag = tag - m.Value = value - return nil -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser_test.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser_test.go deleted file mode 100644 index 534ca5ec..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser_test.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "io/ioutil" - "os" - "path/filepath" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var parses = func(filename string) { - It("parses "+filename, func() { - file, err := os.Open(filename) - Expect(err).To(BeNil()) - - parser := yaml_parser_t{} - yaml_parser_initialize(&parser) - yaml_parser_set_input_reader(&parser, file) - - failed := false - event := yaml_event_t{} - - for { - if !yaml_parser_parse(&parser, &event) { - failed = true - println("---", parser.error, parser.problem, parser.context, "line", parser.problem_mark.line, "col", parser.problem_mark.column) - break - } - - if event.event_type == yaml_STREAM_END_EVENT { - break - } - } - - file.Close() - - // msg := "SUCCESS" - // if failed { - // msg = "FAILED" - // if parser.error != yaml_NO_ERROR { - // m := parser.problem_mark - // fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n", - // parser.context, parser.problem, m.line, m.column) - // } - // } - Expect(failed).To(BeFalse()) - }) -} - -var parseYamls = func(dirname string) { - fileInfos, err := ioutil.ReadDir(dirname) - if err != nil { - panic(err.Error()) - } - - for _, fileInfo := range fileInfos { - if !fileInfo.IsDir() { - parses(filepath.Join(dirname, fileInfo.Name())) - } - } -} - -var _ = Describe("Parser", func() { - parseYamls("fixtures/specification") - parseYamls("fixtures/specification/types") -}) diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader_test.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader_test.go deleted file mode 100644 index 3771ee76..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader_test.go +++ /dev/null @@ -1,291 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - // "fmt" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -/* - * Test cases are stolen from - * http://www.cl.cam.ac.uk/~mgk25/ucs/examples/UTF-8-test.txt - */ - -type test_case struct { - title string - test string - result bool -} - -var _ = Describe("Reader", func() { - LONG := 100000 - - Context("UTF8 Sequences", func() { - utf8_sequences := []test_case{ - /* {"title", "test 1|test 2|...|test N!", (0 or 1)}, */ - - {"a simple test", "'test' is '\xd0\xbf\xd1\x80\xd0\xbe\xd0\xb2\xd0\xb5\xd1\x80\xd0\xba\xd0\xb0' in Russian!", true}, - - {"an empty line", "!", true}, - {"u-0 is a control character", "\x00!", false}, - {"u-80 is a control character", "\xc2\x80!", false}, - {"u-800 is valid", "\xe0\xa0\x80!", true}, - {"u-10000 is valid", "\xf0\x90\x80\x80!", true}, - {"5 bytes sequences are not allowed", "\xf8\x88\x80\x80\x80!", false}, - {"6 bytes sequences are not allowed", "\xfc\x84\x80\x80\x80\x80!", false}, - - {"u-7f is a control character", "\x7f!", false}, - {"u-7FF is valid", "\xdf\xbf!", true}, - {"u-FFFF is a control character", "\xef\xbf\xbf!", false}, - {"u-1FFFFF is too large", "\xf7\xbf\xbf\xbf!", false}, - {"u-3FFFFFF is 5 bytes", "\xfb\xbf\xbf\xbf\xbf!", false}, - {"u-7FFFFFFF is 6 bytes", "\xfd\xbf\xbf\xbf\xbf\xbf!", false}, - - {"u-D7FF", "\xed\x9f\xbf!", true}, - {"u-E000", "\xee\x80\x80!", true}, - {"u-FFFD", "\xef\xbf\xbd!", true}, - {"u-10FFFF", "\xf4\x8f\xbf\xbf!", true}, - {"u-110000", "\xf4\x90\x80\x80!", false}, - - {"first continuation byte", "\x80!", false}, - {"last continuation byte", "\xbf!", false}, - - {"2 continuation bytes", "\x80\xbf!", false}, - {"3 continuation bytes", "\x80\xbf\x80!", false}, - {"4 continuation bytes", "\x80\xbf\x80\xbf!", false}, - {"5 continuation bytes", "\x80\xbf\x80\xbf\x80!", false}, - {"6 continuation bytes", "\x80\xbf\x80\xbf\x80\xbf!", false}, - {"7 continuation bytes", "\x80\xbf\x80\xbf\x80\xbf\x80!", false}, - - {"sequence of all 64 possible continuation bytes", - "\x80|\x81|\x82|\x83|\x84|\x85|\x86|\x87|\x88|\x89|\x8a|\x8b|\x8c|\x8d|\x8e|\x8f|" + - "\x90|\x91|\x92|\x93|\x94|\x95|\x96|\x97|\x98|\x99|\x9a|\x9b|\x9c|\x9d|\x9e|\x9f|" + - "\xa0|\xa1|\xa2|\xa3|\xa4|\xa5|\xa6|\xa7|\xa8|\xa9|\xaa|\xab|\xac|\xad|\xae|\xaf|" + - "\xb0|\xb1|\xb2|\xb3|\xb4|\xb5|\xb6|\xb7|\xb8|\xb9|\xba|\xbb|\xbc|\xbd|\xbe|\xbf!", false}, - {"32 first bytes of 2-byte sequences {0xc0-0xdf}", - "\xc0 |\xc1 |\xc2 |\xc3 |\xc4 |\xc5 |\xc6 |\xc7 |\xc8 |\xc9 |\xca |\xcb |\xcc |\xcd |\xce |\xcf |" + - "\xd0 |\xd1 |\xd2 |\xd3 |\xd4 |\xd5 |\xd6 |\xd7 |\xd8 |\xd9 |\xda |\xdb |\xdc |\xdd |\xde |\xdf !", false}, - {"16 first bytes of 3-byte sequences {0xe0-0xef}", - "\xe0 |\xe1 |\xe2 |\xe3 |\xe4 |\xe5 |\xe6 |\xe7 |\xe8 |\xe9 |\xea |\xeb |\xec |\xed |\xee |\xef !", false}, - {"8 first bytes of 4-byte sequences {0xf0-0xf7}", "\xf0 |\xf1 |\xf2 |\xf3 |\xf4 |\xf5 |\xf6 |\xf7 !", false}, - {"4 first bytes of 5-byte sequences {0xf8-0xfb}", "\xf8 |\xf9 |\xfa |\xfb !", false}, - {"2 first bytes of 6-byte sequences {0xfc-0xfd}", "\xfc |\xfd !", false}, - - {"sequences with last byte missing {u-0}", - "\xc0|\xe0\x80|\xf0\x80\x80|\xf8\x80\x80\x80|\xfc\x80\x80\x80\x80!", false}, - {"sequences with last byte missing {u-...FF}", - "\xdf|\xef\xbf|\xf7\xbf\xbf|\xfb\xbf\xbf\xbf|\xfd\xbf\xbf\xbf\xbf!", false}, - - {"impossible bytes", "\xfe|\xff|\xfe\xfe\xff\xff!", false}, - - {"overlong sequences {u-2f}", - "\xc0\xaf|\xe0\x80\xaf|\xf0\x80\x80\xaf|\xf8\x80\x80\x80\xaf|\xfc\x80\x80\x80\x80\xaf!", false}, - - {"maximum overlong sequences", - "\xc1\xbf|\xe0\x9f\xbf|\xf0\x8f\xbf\xbf|\xf8\x87\xbf\xbf\xbf|\xfc\x83\xbf\xbf\xbf\xbf!", false}, - - {"overlong representation of the NUL character", - "\xc0\x80|\xe0\x80\x80|\xf0\x80\x80\x80|\xf8\x80\x80\x80\x80|\xfc\x80\x80\x80\x80\x80!", false}, - - {"single UTF-16 surrogates", - "\xed\xa0\x80|\xed\xad\xbf|\xed\xae\x80|\xed\xaf\xbf|\xed\xb0\x80|\xed\xbe\x80|\xed\xbf\xbf!", false}, - - {"paired UTF-16 surrogates", - "\xed\xa0\x80\xed\xb0\x80|\xed\xa0\x80\xed\xbf\xbf|\xed\xad\xbf\xed\xb0\x80|" + - "\xed\xad\xbf\xed\xbf\xbf|\xed\xae\x80\xed\xb0\x80|\xed\xae\x80\xed\xbf\xbf|" + - "\xed\xaf\xbf\xed\xb0\x80|\xed\xaf\xbf\xed\xbf\xbf!", false}, - - {"other illegal code positions", "\xef\xbf\xbe|\xef\xbf\xbf!", false}, - } - - check_sequence := func(tc test_case) { - It(tc.title, func() { - start := 0 - end := start - bytes := []byte(tc.test) - - for { - for bytes[end] != '|' && bytes[end] != '!' { - end++ - } - - parser := yaml_parser_t{} - yaml_parser_initialize(&parser) - yaml_parser_set_input_string(&parser, bytes) - result := yaml_parser_update_buffer(&parser, end-start) - Expect(result).To(Equal(tc.result)) - // outcome := '+' - // if result != tc.result { - // outcome = '-' - // } - // fmt.Printf("\t\t %c %s", outcome, tc.title) - // if parser.error == yaml_NO_ERROR { - // fmt.Printf("(no error)\n") - // } else if parser.error == yaml_READER_ERROR { - // if parser.problem_value != -1 { - // fmt.Printf("(reader error: %s: #%X at %d)\n", - // parser.problem, parser.problem_value, parser.problem_offset) - // } else { - // fmt.Printf("(reader error: %s: at %d)\n", - // parser.problem, parser.problem_offset) - // } - // } - - if bytes[end] == '!' { - break - } - - end++ - start = end - yaml_parser_delete(&parser) - } - }) - } - - for _, test := range utf8_sequences { - check_sequence(test) - } - }) - - Context("BOMs", func() { - boms := []test_case{ - /* {"title", "test!", lenth}, */ - {"no bom (utf-8)", "Hi is \xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82!", true}, - {"bom (utf-8)", "\xef\xbb\xbfHi is \xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82!", true}, - {"bom (utf-16-le)", "\xff\xfeH\x00i\x00 \x00i\x00s\x00 \x00\x1f\x04@\x04" + "8\x04" + "2\x04" + "5\x04" + "B\x04!", true}, - {"bom (utf-16-be)", "\xfe\xff\x00H\x00i\x00 \x00i\x00s\x00 \x04\x1f\x04@\x04" + "8\x04" + "2\x04" + "5\x04" + "B!", true}, - } - - check_bom := func(tc test_case) { - It(tc.title, func() { - start := 0 - end := start - bytes := []byte(tc.test) - - for bytes[end] != '!' { - end++ - } - - parser := yaml_parser_t{} - yaml_parser_initialize(&parser) - yaml_parser_set_input_string(&parser, bytes[:end-start]) - result := yaml_parser_update_buffer(&parser, end-start) - Expect(result).To(Equal(tc.result)) - yaml_parser_delete(&parser) - }) - } - - for _, test := range boms { - check_bom(test) - } - - }) - - Context("Long UTF8", func() { - It("parses properly", func() { - buffer := make([]byte, 0, 3+LONG*2) - buffer = append(buffer, '\xef', '\xbb', '\xbf') - for j := 0; j < LONG; j++ { - if j%2 == 1 { - buffer = append(buffer, '\xd0', '\x90') - } else { - buffer = append(buffer, '\xd0', '\xaf') - } - } - parser := yaml_parser_t{} - yaml_parser_initialize(&parser) - yaml_parser_set_input_string(&parser, buffer) - - for k := 0; k < LONG; k++ { - if parser.unread == 0 { - updated := yaml_parser_update_buffer(&parser, 1) - Expect(updated).To(BeTrue()) - // printf("\treader error: %s at %d\n", parser.problem, parser.problem_offset); - } - Expect(parser.unread).NotTo(Equal(0)) - // printf("\tnot enough characters at %d\n", k); - var ch0, ch1 byte - if k%2 == 1 { - ch0 = '\xd0' - ch1 = '\x90' - } else { - ch0 = '\xd0' - ch1 = '\xaf' - } - Expect(parser.buffer[parser.buffer_pos]).To(Equal(ch0)) - Expect(parser.buffer[parser.buffer_pos+1]).To(Equal(ch1)) - // printf("\tincorrect UTF-8 sequence: %X %X instead of %X %X\n", - // (int)parser.buffer.pointer[0], (int)parser.buffer.pointer[1], - // (int)ch0, (int)ch1); - - parser.buffer_pos += 2 - parser.unread -= 1 - } - updated := yaml_parser_update_buffer(&parser, 1) - Expect(updated).To(BeTrue()) - // printf("\treader error: %s at %d\n", parser.problem, parser.problem_offset); - yaml_parser_delete(&parser) - }) - }) - - Context("Long UTF16", func() { - It("parses properly", func() { - buffer := make([]byte, 0, 2+LONG*2) - buffer = append(buffer, '\xff', '\xfe') - for j := 0; j < LONG; j++ { - if j%2 == 1 { - buffer = append(buffer, '\x10', '\x04') - } else { - buffer = append(buffer, '/', '\x04') - } - } - parser := yaml_parser_t{} - yaml_parser_initialize(&parser) - yaml_parser_set_input_string(&parser, buffer) - - for k := 0; k < LONG; k++ { - if parser.unread == 0 { - updated := yaml_parser_update_buffer(&parser, 1) - Expect(updated).To(BeTrue()) - // printf("\treader error: %s at %d\n", parser.problem, parser.problem_offset); - } - Expect(parser.unread).NotTo(Equal(0)) - // printf("\tnot enough characters at %d\n", k); - var ch0, ch1 byte - if k%2 == 1 { - ch0 = '\xd0' - ch1 = '\x90' - } else { - ch0 = '\xd0' - ch1 = '\xaf' - } - Expect(parser.buffer[parser.buffer_pos]).To(Equal(ch0)) - Expect(parser.buffer[parser.buffer_pos+1]).To(Equal(ch1)) - // printf("\tincorrect UTF-8 sequence: %X %X instead of %X %X\n", - // (int)parser.buffer.pointer[0], (int)parser.buffer.pointer[1], - // (int)ch0, (int)ch1); - - parser.buffer_pos += 2 - parser.unread -= 1 - } - updated := yaml_parser_update_buffer(&parser, 1) - Expect(updated).To(BeTrue()) - // printf("\treader error: %s at %d\n", parser.problem, parser.problem_offset); - yaml_parser_delete(&parser) - }) - }) -}) diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver_test.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver_test.go deleted file mode 100644 index 88e61fd0..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver_test.go +++ /dev/null @@ -1,665 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "math" - "reflect" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Resolver", func() { - var event yaml_event_t - - var nulls = []string{"~", "null", "Null", "NULL"} - - BeforeEach(func() { - event = yaml_event_t{} - }) - - Context("Resolve", func() { - Context("Implicit events", func() { - checkNulls := func(f func()) { - for _, null := range nulls { - event = yaml_event_t{implicit: true} - event.value = []byte(null) - f() - } - } - - BeforeEach(func() { - event.implicit = true - }) - - Context("String", func() { - It("resolves a string", func() { - aString := "" - v := reflect.ValueOf(&aString) - event.value = []byte("abc") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_STR_TAG)) - Expect(aString).To(Equal("abc")) - }) - - It("resolves the empty string", func() { - aString := "abc" - v := reflect.ValueOf(&aString) - event.value = []byte("") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_STR_TAG)) - Expect(aString).To(Equal("")) - - }) - - It("resolves null", func() { - checkNulls(func() { - aString := "abc" - v := reflect.ValueOf(&aString) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_NULL_TAG)) - Expect(aString).To(Equal("")) - }) - }) - - It("resolves null pointers", func() { - checkNulls(func() { - aString := "abc" - pString := &aString - v := reflect.ValueOf(&pString) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_NULL_TAG)) - Expect(pString).To(BeNil()) - }) - }) - - }) - - Context("Booleans", func() { - match_bool := func(val string, expected bool) { - b := !expected - - v := reflect.ValueOf(&b) - event.value = []byte(val) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_BOOL_TAG)) - Expect(b).To(Equal(expected)) - } - - It("resolves on", func() { - match_bool("on", true) - match_bool("ON", true) - }) - - It("resolves off", func() { - match_bool("off", false) - match_bool("OFF", false) - }) - - It("resolves true", func() { - match_bool("true", true) - match_bool("TRUE", true) - }) - - It("resolves false", func() { - match_bool("false", false) - match_bool("FALSE", false) - }) - - It("resolves yes", func() { - match_bool("yes", true) - match_bool("YES", true) - }) - - It("resolves no", func() { - match_bool("no", false) - match_bool("NO", false) - }) - - It("reports an error otherwise", func() { - b := true - v := reflect.ValueOf(&b) - event.value = []byte("fail") - - _, err := resolve(event, v.Elem(), false) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("Invalid boolean: 'fail' at line 0, column 0")) - }) - - It("resolves null", func() { - checkNulls(func() { - b := true - v := reflect.ValueOf(&b) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_NULL_TAG)) - Expect(b).To(BeFalse()) - }) - }) - - It("resolves null pointers", func() { - checkNulls(func() { - b := true - pb := &b - v := reflect.ValueOf(&pb) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_NULL_TAG)) - Expect(pb).To(BeNil()) - }) - }) - }) - - Context("Ints", func() { - It("simple ints", func() { - i := 0 - v := reflect.ValueOf(&i) - event.value = []byte("1234") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_INT_TAG)) - Expect(i).To(Equal(1234)) - }) - - It("positive ints", func() { - i := int16(0) - v := reflect.ValueOf(&i) - event.value = []byte("+678") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_INT_TAG)) - Expect(i).To(Equal(int16(678))) - }) - - It("negative ints", func() { - i := int32(0) - v := reflect.ValueOf(&i) - event.value = []byte("-2345") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_INT_TAG)) - Expect(i).To(Equal(int32(-2345))) - }) - - It("base 8", func() { - i := 0 - v := reflect.ValueOf(&i) - event.value = []byte("0o12") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_INT_TAG)) - Expect(i).To(Equal(10)) - }) - - It("base 16", func() { - i := 0 - v := reflect.ValueOf(&i) - event.value = []byte("0xff") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_INT_TAG)) - Expect(i).To(Equal(255)) - }) - - It("fails on overflow", func() { - i := int8(0) - v := reflect.ValueOf(&i) - event.value = []byte("2345") - - _, err := resolve(event, v.Elem(), false) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("Invalid integer: '2345' at line 0, column 0")) - }) - - It("fails on invalid int", func() { - i := 0 - v := reflect.ValueOf(&i) - event.value = []byte("234f") - - _, err := resolve(event, v.Elem(), false) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("Invalid integer: '234f' at line 0, column 0")) - }) - - It("resolves null", func() { - checkNulls(func() { - i := 1 - v := reflect.ValueOf(&i) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_NULL_TAG)) - Expect(i).To(Equal(0)) - }) - }) - - It("resolves null pointers", func() { - checkNulls(func() { - i := 1 - pi := &i - v := reflect.ValueOf(&pi) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_NULL_TAG)) - Expect(pi).To(BeNil()) - }) - }) - - It("returns a Number", func() { - var i Number - v := reflect.ValueOf(&i) - - tag, err := resolve_int("12345", v.Elem(), true, event) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_INT_TAG)) - Expect(i).To(Equal(Number("12345"))) - Expect(i.Int64()).To(Equal(int64(12345))) - - event.value = []byte("1234") - tag, err = resolve(event, v.Elem(), true) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_INT_TAG)) - Expect(i).To(Equal(Number("1234"))) - }) - }) - - Context("UInts", func() { - It("resolves simple uints", func() { - i := uint(0) - v := reflect.ValueOf(&i) - event.value = []byte("1234") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_INT_TAG)) - Expect(i).To(Equal(uint(1234))) - }) - - It("resolves positive uints", func() { - i := uint16(0) - v := reflect.ValueOf(&i) - event.value = []byte("+678") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_INT_TAG)) - Expect(i).To(Equal(uint16(678))) - }) - - It("base 8", func() { - i := uint(0) - v := reflect.ValueOf(&i) - event.value = []byte("0o12") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_INT_TAG)) - Expect(i).To(Equal(uint(10))) - }) - - It("base 16", func() { - i := uint(0) - v := reflect.ValueOf(&i) - event.value = []byte("0xff") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_INT_TAG)) - Expect(i).To(Equal(uint(255))) - }) - - It("fails with negative ints", func() { - i := uint(0) - v := reflect.ValueOf(&i) - event.value = []byte("-2345") - - _, err := resolve(event, v.Elem(), false) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("Unsigned int with negative value: '-2345' at line 0, column 0")) - }) - - It("fails on overflow", func() { - i := uint8(0) - v := reflect.ValueOf(&i) - event.value = []byte("2345") - - _, err := resolve(event, v.Elem(), false) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("Invalid unsigned integer: '2345' at line 0, column 0")) - }) - - It("resolves null", func() { - checkNulls(func() { - i := uint(1) - v := reflect.ValueOf(&i) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_NULL_TAG)) - Expect(i).To(Equal(uint(0))) - }) - }) - - It("resolves null pointers", func() { - checkNulls(func() { - i := uint(1) - pi := &i - v := reflect.ValueOf(&pi) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_NULL_TAG)) - Expect(pi).To(BeNil()) - }) - }) - - It("returns a Number", func() { - var i Number - v := reflect.ValueOf(&i) - - tag, err := resolve_uint("12345", v.Elem(), true, event) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_INT_TAG)) - Expect(i).To(Equal(Number("12345"))) - - event.value = []byte("1234") - tag, err = resolve(event, v.Elem(), true) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_INT_TAG)) - Expect(i).To(Equal(Number("1234"))) - }) - }) - - Context("Floats", func() { - It("float32", func() { - f := float32(0) - v := reflect.ValueOf(&f) - event.value = []byte("2345.01") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_FLOAT_TAG)) - Expect(f).To(Equal(float32(2345.01))) - }) - - It("float64", func() { - f := float64(0) - v := reflect.ValueOf(&f) - event.value = []byte("-456456.01") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_FLOAT_TAG)) - Expect(f).To(Equal(float64(-456456.01))) - }) - - It("+inf", func() { - f := float64(0) - v := reflect.ValueOf(&f) - event.value = []byte("+.inf") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_FLOAT_TAG)) - Expect(f).To(Equal(math.Inf(1))) - }) - - It("-inf", func() { - f := float32(0) - v := reflect.ValueOf(&f) - event.value = []byte("-.inf") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_FLOAT_TAG)) - Expect(f).To(Equal(float32(math.Inf(-1)))) - }) - - It("nan", func() { - f := float64(0) - v := reflect.ValueOf(&f) - event.value = []byte(".NaN") - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_FLOAT_TAG)) - Expect(math.IsNaN(f)).To(BeTrue()) - }) - - It("fails on overflow", func() { - i := float32(0) - v := reflect.ValueOf(&i) - event.value = []byte("123e10000") - - _, err := resolve(event, v.Elem(), false) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("Invalid float: '123e10000' at line 0, column 0")) - }) - - It("fails on invalid float", func() { - i := float32(0) - v := reflect.ValueOf(&i) - event.value = []byte("123e1a") - - _, err := resolve(event, v.Elem(), false) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("Invalid float: '123e1a' at line 0, column 0")) - }) - - It("resolves null", func() { - checkNulls(func() { - f := float64(1) - v := reflect.ValueOf(&f) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_NULL_TAG)) - Expect(f).To(Equal(0.0)) - }) - }) - - It("resolves null pointers", func() { - checkNulls(func() { - f := float64(1) - pf := &f - v := reflect.ValueOf(&pf) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_NULL_TAG)) - Expect(pf).To(BeNil()) - }) - }) - - It("returns a Number", func() { - var i Number - v := reflect.ValueOf(&i) - - tag, err := resolve_float("12.345", v.Elem(), true, event) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_FLOAT_TAG)) - Expect(i).To(Equal(Number("12.345"))) - Expect(i.Float64()).To(Equal(12.345)) - - event.value = []byte("1.234") - tag, err = resolve(event, v.Elem(), true) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_FLOAT_TAG)) - Expect(i).To(Equal(Number("1.234"))) - }) - }) - - Context("Timestamps", func() { - parse_date := func(val string, date time.Time) { - d := time.Now() - v := reflect.ValueOf(&d) - event.value = []byte(val) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal("")) - Expect(d).To(Equal(date)) - } - - It("date", func() { - parse_date("2002-12-14", time.Date(2002, time.December, 14, 0, 0, 0, 0, time.UTC)) - }) - - It("canonical", func() { - parse_date("2001-12-15T02:59:43.1Z", time.Date(2001, time.December, 15, 2, 59, 43, int(1*time.Millisecond), time.UTC)) - }) - - It("iso8601", func() { - parse_date("2001-12-14t21:59:43.10-05:00", time.Date(2001, time.December, 14, 21, 59, 43, int(10*time.Millisecond), time.FixedZone("", -5*3600))) - }) - - It("space separated", func() { - parse_date("2001-12-14 21:59:43.10 -5", time.Date(2001, time.December, 14, 21, 59, 43, int(10*time.Millisecond), time.FixedZone("", -5*3600))) - }) - - It("no time zone", func() { - parse_date("2001-12-15 2:59:43.10", time.Date(2001, time.December, 15, 2, 59, 43, int(10*time.Millisecond), time.UTC)) - }) - - It("resolves null", func() { - checkNulls(func() { - d := time.Now() - v := reflect.ValueOf(&d) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_NULL_TAG)) - Expect(d).To(Equal(time.Time{})) - }) - }) - - It("resolves null pointers", func() { - checkNulls(func() { - d := time.Now() - pd := &d - v := reflect.ValueOf(&pd) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_NULL_TAG)) - Expect(pd).To(BeNil()) - }) - }) - }) - - Context("Binary tag", func() { - It("string", func() { - checkNulls(func() { - event.value = []byte("YWJjZGVmZw==") - event.tag = []byte("!binary") - aString := "" - v := reflect.ValueOf(&aString) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_STR_TAG)) - Expect(aString).To(Equal("abcdefg")) - }) - }) - - It("[]byte", func() { - checkNulls(func() { - event.value = []byte("YWJjZGVmZw==") - event.tag = []byte("!binary") - bytes := []byte(nil) - v := reflect.ValueOf(&bytes) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_STR_TAG)) - Expect(bytes).To(Equal([]byte("abcdefg"))) - }) - }) - - It("returns a []byte when provided no hints", func() { - checkNulls(func() { - event.value = []byte("YWJjZGVmZw==") - event.tag = []byte("!binary") - var intf interface{} - v := reflect.ValueOf(&intf) - - tag, err := resolve(event, v.Elem(), false) - Expect(err).NotTo(HaveOccurred()) - Expect(tag).To(Equal(yaml_STR_TAG)) - Expect(intf).To(Equal([]byte("abcdefg"))) - }) - }) - }) - - It("fails to resolve a pointer", func() { - aString := "" - pString := &aString - v := reflect.ValueOf(&pString) - event.value = []byte("abc") - - _, err := resolve(event, v.Elem(), false) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("Unknown resolution for 'abc' using <*string Value> at line 0, column 0")) - }) - }) - - Context("Not an implicit event && no tag", func() { - It("bool returns a string", func() { - event.value = []byte("on") - - tag, result := resolveInterface(event, false) - Expect(result).To(Equal("on")) - Expect(tag).To(Equal("")) - }) - - It("number returns a string", func() { - event.value = []byte("1234") - - tag, result := resolveInterface(event, false) - Expect(result).To(Equal("1234")) - Expect(tag).To(Equal("")) - }) - - It("returns the empty string", func() { - event.value = []byte("") - // event.implicit = true - - tag, result := resolveInterface(event, false) - Expect(result).To(Equal("")) - Expect(tag).To(Equal("")) - }) - }) - }) -}) diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner_test.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner_test.go deleted file mode 100644 index db31e651..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner_test.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "io/ioutil" - "os" - "path/filepath" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var scan = func(filename string) { - It("scan "+filename, func() { - file, err := os.Open(filename) - Expect(err).To(BeNil()) - - parser := yaml_parser_t{} - yaml_parser_initialize(&parser) - yaml_parser_set_input_reader(&parser, file) - - failed := false - token := yaml_token_t{} - - for { - if !yaml_parser_scan(&parser, &token) { - failed = true - break - } - - if token.token_type == yaml_STREAM_END_TOKEN { - break - } - } - - file.Close() - - // msg := "SUCCESS" - // if failed { - // msg = "FAILED" - // if parser.error != yaml_NO_ERROR { - // m := parser.problem_mark - // fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n", - // parser.context, parser.problem, m.line, m.column) - // } - // } - Expect(failed).To(BeFalse()) - }) -} - -var scanYamls = func(dirname string) { - fileInfos, err := ioutil.ReadDir(dirname) - if err != nil { - panic(err.Error()) - } - - for _, fileInfo := range fileInfos { - if !fileInfo.IsDir() { - scan(filepath.Join(dirname, fileInfo.Name())) - } - } -} - -var _ = Describe("Scanner", func() { - scanYamls("fixtures/specification") - scanYamls("fixtures/specification/types") -}) diff --git a/vendor/github.com/cloudfoundry/gosigar/.gitignore b/vendor/github.com/cloudfoundry/gosigar/.gitignore new file mode 100644 index 00000000..8000dd9d --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/.gitignore @@ -0,0 +1 @@ +.vagrant diff --git a/vendor/github.com/cloudfoundry/gosigar/.travis.yml b/vendor/github.com/cloudfoundry/gosigar/.travis.yml new file mode 100644 index 00000000..2a9c5d0c --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.2 + +install: + - 'go install github.com/onsi/ginkgo/ginkgo' +script: 'ginkgo -r' diff --git a/vendor/github.com/cloudfoundry/gosigar/LICENSE b/vendor/github.com/cloudfoundry/gosigar/LICENSE new file mode 100644 index 00000000..11069edd --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/cloudfoundry/gosigar/NOTICE b/vendor/github.com/cloudfoundry/gosigar/NOTICE new file mode 100644 index 00000000..fda553b5 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/NOTICE @@ -0,0 +1,9 @@ +Copyright (c) [2009-2011] VMware, Inc. All Rights Reserved. + +This product is licensed to you under the Apache License, Version 2.0 (the "License"). +You may not use this product except in compliance with the License. + +This product includes a number of subcomponents with +separate copyright notices and license terms. Your use of these +subcomponents is subject to the terms and conditions of the +subcomponent's license, as noted in the LICENSE file. \ No newline at end of file diff --git a/vendor/github.com/cloudfoundry/gosigar/README.md b/vendor/github.com/cloudfoundry/gosigar/README.md new file mode 100644 index 00000000..90d51f9b --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/README.md @@ -0,0 +1,22 @@ +# Go sigar + +## Overview + +Go sigar is a golang implementation of the +[sigar API](https://github.com/hyperic/sigar). The Go version of +sigar has a very similar interface, but is being written from scratch +in pure go/cgo, rather than cgo bindings for libsigar. + +## Test drive + + $ go get github.com/cloudfoundry/gosigar + $ cd $GOPATH/src/github.com/cloudfoundry/gosigar/examples + $ go run uptime.go + +## Supported platforms + +Currently targeting modern flavors of darwin and linux. + +## License + +Apache 2.0 diff --git a/vendor/github.com/cloudfoundry/gosigar/Vagrantfile b/vendor/github.com/cloudfoundry/gosigar/Vagrantfile new file mode 100644 index 00000000..6fd990c1 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/Vagrantfile @@ -0,0 +1,25 @@ +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = "hashicorp/precise64" + config.vm.provision "shell", inline: "mkdir -p /home/vagrant/go" + config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/cloudfoundry/gosigar" + config.vm.provision "shell", inline: "chown -R vagrant:vagrant /home/vagrant/go" + install_go = <<-BASH + set -e + +if [ ! -d "/usr/local/go" ]; then + cd /tmp && wget https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz + cd /usr/local + tar xvzf /tmp/go1.3.3.linux-amd64.tar.gz + echo 'export GOPATH=/home/vagrant/go; export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin' >> /home/vagrant/.bashrc +fi +export GOPATH=/home/vagrant/go +export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin +/usr/local/go/bin/go get -u github.com/onsi/ginkgo/ginkgo +/usr/local/go/bin/go get -u github.com/onsi/gomega; +BASH + config.vm.provision "shell", inline: 'apt-get install -y git-core' + config.vm.provision "shell", inline: install_go +end diff --git a/vendor/github.com/cloudfoundry/gosigar/concrete_sigar.go b/vendor/github.com/cloudfoundry/gosigar/concrete_sigar.go new file mode 100644 index 00000000..0e80aa4b --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/concrete_sigar.go @@ -0,0 +1,69 @@ +package sigar + +import ( + "time" +) + +type ConcreteSigar struct{} + +func (c *ConcreteSigar) CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) { + // samplesCh is buffered to 1 value to immediately return first CPU sample + samplesCh := make(chan Cpu, 1) + + stopCh := make(chan struct{}) + + go func() { + var cpuUsage Cpu + + // Immediately provide non-delta value. + // samplesCh is buffered to 1 value, so it will not block. + cpuUsage.Get() + samplesCh <- cpuUsage + + ticker := time.NewTicker(collectionInterval) + + for { + select { + case <-ticker.C: + previousCpuUsage := cpuUsage + + cpuUsage.Get() + + select { + case samplesCh <- cpuUsage.Delta(previousCpuUsage): + default: + // Include default to avoid channel blocking + } + + case <-stopCh: + return + } + } + }() + + return samplesCh, stopCh +} + +func (c *ConcreteSigar) GetLoadAverage() (LoadAverage, error) { + l := LoadAverage{} + err := l.Get() + return l, err +} + +func (c *ConcreteSigar) GetMem() (Mem, error) { + m := Mem{} + err := m.Get() + return m, err +} + +func (c *ConcreteSigar) GetSwap() (Swap, error) { + s := Swap{} + err := s.Get() + return s, err +} + +func (c *ConcreteSigar) GetFileSystemUsage(path string) (FileSystemUsage, error) { + f := FileSystemUsage{} + err := f.Get(path) + return f, err +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_darwin.go b/vendor/github.com/cloudfoundry/gosigar/sigar_darwin.go new file mode 100644 index 00000000..e3a8c4b9 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_darwin.go @@ -0,0 +1,467 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "syscall" + "time" + "unsafe" +) + +func (self *LoadAverage) Get() error { + avg := []C.double{0, 0, 0} + + C.getloadavg(&avg[0], C.int(len(avg))) + + self.One = float64(avg[0]) + self.Five = float64(avg[1]) + self.Fifteen = float64(avg[2]) + + return nil +} + +func (self *Uptime) Get() error { + tv := syscall.Timeval32{} + + if err := sysctlbyname("kern.boottime", &tv); err != nil { + return err + } + + self.Length = time.Since(time.Unix(int64(tv.Sec), int64(tv.Usec)*1000)).Seconds() + + return nil +} + +func (self *Mem) Get() error { + var vmstat C.vm_statistics_data_t + + if err := sysctlbyname("hw.memsize", &self.Total); err != nil { + return err + } + + if err := vm_info(&vmstat); err != nil { + return err + } + + kern := uint64(vmstat.inactive_count) << 12 + self.Free = uint64(vmstat.free_count) << 12 + + self.Used = self.Total - self.Free + self.ActualFree = self.Free + kern + self.ActualUsed = self.Used - kern + + return nil +} + +type xsw_usage struct { + Total, Avail, Used uint64 +} + +func (self *Swap) Get() error { + sw_usage := xsw_usage{} + + if err := sysctlbyname("vm.swapusage", &sw_usage); err != nil { + return err + } + + self.Total = sw_usage.Total + self.Used = sw_usage.Used + self.Free = sw_usage.Avail + + return nil +} + +func (self *Cpu) Get() error { + var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT + var cpuload C.host_cpu_load_info_data_t + + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_CPU_LOAD_INFO, + C.host_info_t(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_statistics error=%d", status) + } + + self.User = uint64(cpuload.cpu_ticks[C.CPU_STATE_USER]) + self.Sys = uint64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) + self.Idle = uint64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) + self.Nice = uint64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) + + return nil +} + +func (self *CpuList) Get() error { + var count C.mach_msg_type_number_t + var cpuload *C.processor_cpu_load_info_data_t + var ncpu C.natural_t + + status := C.host_processor_info(C.host_t(C.mach_host_self()), + C.PROCESSOR_CPU_LOAD_INFO, + &ncpu, + (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_processor_info error=%d", status) + } + + // jump through some cgo casting hoops and ensure we properly free + // the memory that cpuload points to + target := C.vm_map_t(C.mach_task_self_) + address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) + defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) + + // the body of struct processor_cpu_load_info + // aka processor_cpu_load_info_data_t + var cpu_ticks [C.CPU_STATE_MAX]uint32 + + // copy the cpuload array to a []byte buffer + // where we can binary.Read the data + size := int(ncpu) * binary.Size(cpu_ticks) + buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size)) + + bbuf := bytes.NewBuffer(buf) + + self.List = make([]Cpu, 0, ncpu) + + for i := 0; i < int(ncpu); i++ { + cpu := Cpu{} + + err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) + if err != nil { + return err + } + + cpu.User = uint64(cpu_ticks[C.CPU_STATE_USER]) + cpu.Sys = uint64(cpu_ticks[C.CPU_STATE_SYSTEM]) + cpu.Idle = uint64(cpu_ticks[C.CPU_STATE_IDLE]) + cpu.Nice = uint64(cpu_ticks[C.CPU_STATE_NICE]) + + self.List = append(self.List, cpu) + } + + return nil +} + +func (self *FileSystemList) Get() error { + num, err := getfsstat(nil, C.MNT_NOWAIT) + if num < 0 { + return err + } + + buf := make([]syscall.Statfs_t, num) + + num, err = getfsstat(buf, C.MNT_NOWAIT) + if err != nil { + return err + } + + fslist := make([]FileSystem, 0, num) + + for i := 0; i < num; i++ { + fs := FileSystem{} + + fs.DirName = bytePtrToString(&buf[i].Mntonname[0]) + fs.DevName = bytePtrToString(&buf[i].Mntfromname[0]) + fs.SysTypeName = bytePtrToString(&buf[i].Fstypename[0]) + + fslist = append(fslist, fs) + } + + self.List = fslist + + return err +} + +func (self *ProcList) Get() error { + n := C.proc_listpids(C.PROC_ALL_PIDS, 0, nil, 0) + if n <= 0 { + return syscall.EINVAL + } + buf := make([]byte, n) + n = C.proc_listpids(C.PROC_ALL_PIDS, 0, unsafe.Pointer(&buf[0]), n) + if n <= 0 { + return syscall.ENOMEM + } + + var pid int32 + num := int(n) / binary.Size(pid) + list := make([]int, 0, num) + bbuf := bytes.NewBuffer(buf) + + for i := 0; i < num; i++ { + if err := binary.Read(bbuf, binary.LittleEndian, &pid); err != nil { + return err + } + if pid == 0 { + continue + } + + list = append(list, int(pid)) + } + + self.List = list + + return nil +} + +func (self *ProcState) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.Name = C.GoString(&info.pbsd.pbi_comm[0]) + + switch info.pbsd.pbi_status { + case C.SIDL: + self.State = RunStateIdle + case C.SRUN: + self.State = RunStateRun + case C.SSLEEP: + self.State = RunStateSleep + case C.SSTOP: + self.State = RunStateStop + case C.SZOMB: + self.State = RunStateZombie + default: + self.State = RunStateUnknown + } + + self.Ppid = int(info.pbsd.pbi_ppid) + + self.Tty = int(info.pbsd.e_tdev) + + self.Priority = int(info.ptinfo.pti_priority) + + self.Nice = int(info.pbsd.pbi_nice) + + return nil +} + +func (self *ProcMem) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.Size = uint64(info.ptinfo.pti_virtual_size) + self.Resident = uint64(info.ptinfo.pti_resident_size) + self.PageFaults = uint64(info.ptinfo.pti_faults) + + return nil +} + +func (self *ProcTime) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.User = + uint64(info.ptinfo.pti_total_user) / uint64(time.Millisecond) + + self.Sys = + uint64(info.ptinfo.pti_total_system) / uint64(time.Millisecond) + + self.Total = self.User + self.Sys + + self.StartTime = (uint64(info.pbsd.pbi_start_tvsec) * 1000) + + (uint64(info.pbsd.pbi_start_tvusec) / 1000) + + return nil +} + +func (self *ProcArgs) Get(pid int) error { + var args []string + + argv := func(arg string) { + args = append(args, arg) + } + + err := kern_procargs(pid, nil, argv, nil) + + self.List = args + + return err +} + +func (self *ProcExe) Get(pid int) error { + exe := func(arg string) { + self.Name = arg + } + + return kern_procargs(pid, exe, nil, nil) +} + +// wrapper around sysctl KERN_PROCARGS2 +// callbacks params are optional, +// up to the caller as to which pieces of data they want +func kern_procargs(pid int, + exe func(string), + argv func(string), + env func(string, string)) error { + + mib := []C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)} + argmax := uintptr(C.ARG_MAX) + buf := make([]byte, argmax) + err := sysctl(mib, &buf[0], &argmax, nil, 0) + if err != nil { + return nil + } + + bbuf := bytes.NewBuffer(buf) + bbuf.Truncate(int(argmax)) + + var argc int32 + binary.Read(bbuf, binary.LittleEndian, &argc) + + path, err := bbuf.ReadBytes(0) + if exe != nil { + exe(string(chop(path))) + } + + // skip trailing \0's + for { + c, _ := bbuf.ReadByte() + if c != 0 { + bbuf.UnreadByte() + break // start of argv[0] + } + } + + for i := 0; i < int(argc); i++ { + arg, err := bbuf.ReadBytes(0) + if err == io.EOF { + break + } + if argv != nil { + argv(string(chop(arg))) + } + } + + if env == nil { + return nil + } + + delim := []byte{61} // "=" + + for { + line, err := bbuf.ReadBytes(0) + if err == io.EOF || line[0] == 0 { + break + } + pair := bytes.SplitN(chop(line), delim, 2) + env(string(pair[0]), string(pair[1])) + } + + return nil +} + +// XXX copied from zsyscall_darwin_amd64.go +func sysctl(mib []C.int, old *byte, oldlen *uintptr, + new *byte, newlen uintptr) (err error) { + var p0 unsafe.Pointer + p0 = unsafe.Pointer(&mib[0]) + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = e1 + } + return +} + +func vm_info(vmstat *C.vm_statistics_data_t) error { + var count C.mach_msg_type_number_t = C.HOST_VM_INFO_COUNT + + status := C.host_statistics( + C.host_t(C.mach_host_self()), + C.HOST_VM_INFO, + C.host_info_t(unsafe.Pointer(vmstat)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_statistics=%d", status) + } + + return nil +} + +// generic Sysctl buffer unmarshalling +func sysctlbyname(name string, data interface{}) (err error) { + val, err := syscall.Sysctl(name) + if err != nil { + return err + } + + buf := []byte(val) + + switch v := data.(type) { + case *uint64: + *v = *(*uint64)(unsafe.Pointer(&buf[0])) + return + } + + bbuf := bytes.NewBuffer([]byte(val)) + return binary.Read(bbuf, binary.LittleEndian, data) +} + +// syscall.Getfsstat() wrapper is broken, roll our own to workaround. +func getfsstat(buf []syscall.Statfs_t, flags int) (n int, err error) { + var ptr uintptr + var size uintptr + + if len(buf) > 0 { + ptr = uintptr(unsafe.Pointer(&buf[0])) + size = unsafe.Sizeof(buf[0]) * uintptr(len(buf)) + } else { + ptr = uintptr(0) + size = uintptr(0) + } + + trap := uintptr(syscall.SYS_GETFSSTAT64) + ret, _, errno := syscall.Syscall(trap, ptr, size, uintptr(flags)) + + n = int(ret) + if errno != 0 { + err = errno + } + + return +} + +func task_info(pid int, info *C.struct_proc_taskallinfo) error { + size := C.int(unsafe.Sizeof(*info)) + ptr := unsafe.Pointer(info) + + n := C.proc_pidinfo(C.int(pid), C.PROC_PIDTASKALLINFO, 0, ptr, size) + if n != size { + return syscall.ENOMEM + } + + return nil +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_format.go b/vendor/github.com/cloudfoundry/gosigar/sigar_format.go new file mode 100644 index 00000000..d80a64e8 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_format.go @@ -0,0 +1,126 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "time" +) + +// Go version of apr_strfsize +func FormatSize(size uint64) string { + ord := []string{"K", "M", "G", "T", "P", "E"} + o := 0 + buf := new(bytes.Buffer) + w := bufio.NewWriter(buf) + + if size < 973 { + fmt.Fprintf(w, "%3d ", size) + w.Flush() + return buf.String() + } + + for { + remain := size & 1023 + size >>= 10 + + if size >= 973 { + o++ + continue + } + + if size < 9 || (size == 9 && remain < 973) { + remain = ((remain * 5) + 256) / 512 + if remain >= 10 { + size++ + remain = 0 + } + + fmt.Fprintf(w, "%d.%d%s", size, remain, ord[o]) + break + } + + if remain >= 512 { + size++ + } + + fmt.Fprintf(w, "%3d%s", size, ord[o]) + break + } + + w.Flush() + return buf.String() +} + +func FormatPercent(percent float64) string { + return strconv.FormatFloat(percent, 'f', -1, 64) + "%" +} + +func (self *FileSystemUsage) UsePercent() float64 { + b_used := (self.Total - self.Free) / 1024 + b_avail := self.Avail / 1024 + utotal := b_used + b_avail + used := b_used + + if utotal != 0 { + u100 := used * 100 + pct := u100 / utotal + if u100%utotal != 0 { + pct += 1 + } + return (float64(pct) / float64(100)) * 100.0 + } + + return 0.0 +} + +func (self *Uptime) Format() string { + buf := new(bytes.Buffer) + w := bufio.NewWriter(buf) + uptime := uint64(self.Length) + + days := uptime / (60 * 60 * 24) + + if days != 0 { + s := "" + if days > 1 { + s = "s" + } + fmt.Fprintf(w, "%d day%s, ", days, s) + } + + minutes := uptime / 60 + hours := minutes / 60 + hours %= 24 + minutes %= 60 + + fmt.Fprintf(w, "%2d:%02d", hours, minutes) + + w.Flush() + return buf.String() +} + +func (self *ProcTime) FormatStartTime() string { + if self.StartTime == 0 { + return "00:00" + } + start := time.Unix(int64(self.StartTime)/1000, 0) + format := "Jan02" + if time.Since(start).Seconds() < (60 * 60 * 24) { + format = "15:04" + } + return start.Format(format) +} + +func (self *ProcTime) FormatTotal() string { + t := self.Total / 1000 + ss := t % 60 + t /= 60 + mm := t % 60 + t /= 60 + hh := t % 24 + return fmt.Sprintf("%02d:%02d:%02d", hh, mm, ss) +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_interface.go b/vendor/github.com/cloudfoundry/gosigar/sigar_interface.go new file mode 100644 index 00000000..dd72a76b --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_interface.go @@ -0,0 +1,141 @@ +package sigar + +import ( + "time" +) + +type Sigar interface { + CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) + GetLoadAverage() (LoadAverage, error) + GetMem() (Mem, error) + GetSwap() (Swap, error) + GetFileSystemUsage(string) (FileSystemUsage, error) +} + +type Cpu struct { + User uint64 + Nice uint64 + Sys uint64 + Idle uint64 + Wait uint64 + Irq uint64 + SoftIrq uint64 + Stolen uint64 +} + +func (cpu *Cpu) Total() uint64 { + return cpu.User + cpu.Nice + cpu.Sys + cpu.Idle + + cpu.Wait + cpu.Irq + cpu.SoftIrq + cpu.Stolen +} + +func (cpu Cpu) Delta(other Cpu) Cpu { + return Cpu{ + User: cpu.User - other.User, + Nice: cpu.Nice - other.Nice, + Sys: cpu.Sys - other.Sys, + Idle: cpu.Idle - other.Idle, + Wait: cpu.Wait - other.Wait, + Irq: cpu.Irq - other.Irq, + SoftIrq: cpu.SoftIrq - other.SoftIrq, + Stolen: cpu.Stolen - other.Stolen, + } +} + +type LoadAverage struct { + One, Five, Fifteen float64 +} + +type Uptime struct { + Length float64 +} + +type Mem struct { + Total uint64 + Used uint64 + Free uint64 + ActualFree uint64 + ActualUsed uint64 +} + +type Swap struct { + Total uint64 + Used uint64 + Free uint64 +} + +type CpuList struct { + List []Cpu +} + +type FileSystem struct { + DirName string + DevName string + TypeName string + SysTypeName string + Options string + Flags uint32 +} + +type FileSystemList struct { + List []FileSystem +} + +type FileSystemUsage struct { + Total uint64 + Used uint64 + Free uint64 + Avail uint64 + Files uint64 + FreeFiles uint64 +} + +type ProcList struct { + List []int +} + +type RunState byte + +const ( + RunStateSleep = 'S' + RunStateRun = 'R' + RunStateStop = 'T' + RunStateZombie = 'Z' + RunStateIdle = 'D' + RunStateUnknown = '?' +) + +type ProcState struct { + Name string + State RunState + Ppid int + Tty int + Priority int + Nice int + Processor int +} + +type ProcMem struct { + Size uint64 + Resident uint64 + Share uint64 + MinorFaults uint64 + MajorFaults uint64 + PageFaults uint64 +} + +type ProcTime struct { + StartTime uint64 + User uint64 + Sys uint64 + Total uint64 +} + +type ProcArgs struct { + List []string +} + +type ProcExe struct { + Name string + Cwd string + Root string +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_linux.go b/vendor/github.com/cloudfoundry/gosigar/sigar_linux.go new file mode 100644 index 00000000..68ffb0f9 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_linux.go @@ -0,0 +1,386 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "bufio" + "bytes" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "syscall" +) + +var system struct { + ticks uint64 + btime uint64 +} + +var Procd string + +func init() { + system.ticks = 100 // C.sysconf(C._SC_CLK_TCK) + + Procd = "/proc" + + // grab system boot time + readFile(Procd+"/stat", func(line string) bool { + if strings.HasPrefix(line, "btime") { + system.btime, _ = strtoull(line[6:]) + return false // stop reading + } + return true + }) +} + +func (self *LoadAverage) Get() error { + line, err := ioutil.ReadFile(Procd + "/loadavg") + if err != nil { + return nil + } + + fields := strings.Fields(string(line)) + + self.One, _ = strconv.ParseFloat(fields[0], 64) + self.Five, _ = strconv.ParseFloat(fields[1], 64) + self.Fifteen, _ = strconv.ParseFloat(fields[2], 64) + + return nil +} + +func (self *Uptime) Get() error { + sysinfo := syscall.Sysinfo_t{} + + if err := syscall.Sysinfo(&sysinfo); err != nil { + return err + } + + self.Length = float64(sysinfo.Uptime) + + return nil +} + +func (self *Mem) Get() error { + var buffers, cached uint64 + table := map[string]*uint64{ + "MemTotal": &self.Total, + "MemFree": &self.Free, + "Buffers": &buffers, + "Cached": &cached, + } + + if err := parseMeminfo(table); err != nil { + return err + } + + self.Used = self.Total - self.Free + kern := buffers + cached + self.ActualFree = self.Free + kern + self.ActualUsed = self.Used - kern + + return nil +} + +func (self *Swap) Get() error { + table := map[string]*uint64{ + "SwapTotal": &self.Total, + "SwapFree": &self.Free, + } + + if err := parseMeminfo(table); err != nil { + return err + } + + self.Used = self.Total - self.Free + return nil +} + +func (self *Cpu) Get() error { + return readFile(Procd+"/stat", func(line string) bool { + if len(line) > 4 && line[0:4] == "cpu " { + parseCpuStat(self, line) + return false + } + return true + + }) +} + +func (self *CpuList) Get() error { + capacity := len(self.List) + if capacity == 0 { + capacity = 4 + } + list := make([]Cpu, 0, capacity) + + err := readFile(Procd+"/stat", func(line string) bool { + if len(line) > 3 && line[0:3] == "cpu" && line[3] != ' ' { + cpu := Cpu{} + parseCpuStat(&cpu, line) + list = append(list, cpu) + } + return true + }) + + self.List = list + + return err +} + +func (self *FileSystemList) Get() error { + capacity := len(self.List) + if capacity == 0 { + capacity = 10 + } + fslist := make([]FileSystem, 0, capacity) + + err := readFile("/etc/mtab", func(line string) bool { + fields := strings.Fields(line) + + fs := FileSystem{} + fs.DevName = fields[0] + fs.DirName = fields[1] + fs.SysTypeName = fields[2] + fs.Options = fields[3] + + fslist = append(fslist, fs) + + return true + }) + + self.List = fslist + + return err +} + +func (self *ProcList) Get() error { + dir, err := os.Open(Procd) + if err != nil { + return err + } + defer dir.Close() + + const readAllDirnames = -1 // see os.File.Readdirnames doc + + names, err := dir.Readdirnames(readAllDirnames) + if err != nil { + return err + } + + capacity := len(names) + list := make([]int, 0, capacity) + + for _, name := range names { + if name[0] < '0' || name[0] > '9' { + continue + } + pid, err := strconv.Atoi(name) + if err == nil { + list = append(list, pid) + } + } + + self.List = list + + return nil +} + +func (self *ProcState) Get(pid int) error { + contents, err := readProcFile(pid, "stat") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + self.Name = fields[1][1 : len(fields[1])-1] // strip ()'s + + self.State = RunState(fields[2][0]) + + self.Ppid, _ = strconv.Atoi(fields[3]) + + self.Tty, _ = strconv.Atoi(fields[6]) + + self.Priority, _ = strconv.Atoi(fields[17]) + + self.Nice, _ = strconv.Atoi(fields[18]) + + self.Processor, _ = strconv.Atoi(fields[38]) + + return nil +} + +func (self *ProcMem) Get(pid int) error { + contents, err := readProcFile(pid, "statm") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + size, _ := strtoull(fields[0]) + self.Size = size << 12 + + rss, _ := strtoull(fields[1]) + self.Resident = rss << 12 + + share, _ := strtoull(fields[2]) + self.Share = share << 12 + + contents, err = readProcFile(pid, "stat") + if err != nil { + return err + } + + fields = strings.Fields(string(contents)) + + self.MinorFaults, _ = strtoull(fields[10]) + self.MajorFaults, _ = strtoull(fields[12]) + self.PageFaults = self.MinorFaults + self.MajorFaults + + return nil +} + +func (self *ProcTime) Get(pid int) error { + contents, err := readProcFile(pid, "stat") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + user, _ := strtoull(fields[13]) + sys, _ := strtoull(fields[14]) + // convert to millis + self.User = user * (1000 / system.ticks) + self.Sys = sys * (1000 / system.ticks) + self.Total = self.User + self.Sys + + // convert to millis + self.StartTime, _ = strtoull(fields[21]) + self.StartTime /= system.ticks + self.StartTime += system.btime + self.StartTime *= 1000 + + return nil +} + +func (self *ProcArgs) Get(pid int) error { + contents, err := readProcFile(pid, "cmdline") + if err != nil { + return err + } + + bbuf := bytes.NewBuffer(contents) + + var args []string + + for { + arg, err := bbuf.ReadBytes(0) + if err == io.EOF { + break + } + args = append(args, string(chop(arg))) + } + + self.List = args + + return nil +} + +func (self *ProcExe) Get(pid int) error { + fields := map[string]*string{ + "exe": &self.Name, + "cwd": &self.Cwd, + "root": &self.Root, + } + + for name, field := range fields { + val, err := os.Readlink(procFileName(pid, name)) + + if err != nil { + return err + } + + *field = val + } + + return nil +} + +func parseMeminfo(table map[string]*uint64) error { + return readFile(Procd+"/meminfo", func(line string) bool { + fields := strings.Split(line, ":") + + if ptr := table[fields[0]]; ptr != nil { + num := strings.TrimLeft(fields[1], " ") + val, err := strtoull(strings.Fields(num)[0]) + if err == nil { + *ptr = val * 1024 + } + } + + return true + }) +} + +func parseCpuStat(self *Cpu, line string) error { + fields := strings.Fields(line) + + self.User, _ = strtoull(fields[1]) + self.Nice, _ = strtoull(fields[2]) + self.Sys, _ = strtoull(fields[3]) + self.Idle, _ = strtoull(fields[4]) + self.Wait, _ = strtoull(fields[5]) + self.Irq, _ = strtoull(fields[6]) + self.SoftIrq, _ = strtoull(fields[7]) + self.Stolen, _ = strtoull(fields[8]) + + return nil +} + +func readFile(file string, handler func(string) bool) error { + contents, err := ioutil.ReadFile(file) + if err != nil { + return err + } + + reader := bufio.NewReader(bytes.NewBuffer(contents)) + + for { + line, _, err := reader.ReadLine() + if err == io.EOF { + break + } + if !handler(string(line)) { + break + } + } + + return nil +} + +func strtoull(val string) (uint64, error) { + return strconv.ParseUint(val, 10, 64) +} + +func procFileName(pid int, name string) string { + return Procd + "/" + strconv.Itoa(pid) + "/" + name +} + +func readProcFile(pid int, name string) ([]byte, error) { + path := procFileName(pid, name) + contents, err := ioutil.ReadFile(path) + + if err != nil { + if perr, ok := err.(*os.PathError); ok { + if perr.Err == syscall.ENOENT { + return nil, syscall.ESRCH + } + } + } + + return contents, err +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_unix.go b/vendor/github.com/cloudfoundry/gosigar/sigar_unix.go new file mode 100644 index 00000000..39f18784 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_unix.go @@ -0,0 +1,26 @@ +// Copyright (c) 2012 VMware, Inc. + +// +build darwin freebsd linux netbsd openbsd + +package sigar + +import "syscall" + +func (self *FileSystemUsage) Get(path string) error { + stat := syscall.Statfs_t{} + err := syscall.Statfs(path, &stat) + if err != nil { + return err + } + + bsize := stat.Bsize / 512 + + self.Total = (uint64(stat.Blocks) * uint64(bsize)) >> 1 + self.Free = (uint64(stat.Bfree) * uint64(bsize)) >> 1 + self.Avail = (uint64(stat.Bavail) * uint64(bsize)) >> 1 + self.Used = self.Total - self.Free + self.Files = stat.Files + self.FreeFiles = stat.Ffree + + return nil +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_util.go b/vendor/github.com/cloudfoundry/gosigar/sigar_util.go new file mode 100644 index 00000000..a02df941 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_util.go @@ -0,0 +1,22 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "unsafe" +) + +func bytePtrToString(ptr *int8) string { + bytes := (*[10000]byte)(unsafe.Pointer(ptr)) + + n := 0 + for bytes[n] != 0 { + n++ + } + + return string(bytes[0:n]) +} + +func chop(buf []byte) []byte { + return buf[0 : len(buf)-1] +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_windows.go b/vendor/github.com/cloudfoundry/gosigar/sigar_windows.go new file mode 100644 index 00000000..0c779d7c --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_windows.go @@ -0,0 +1,100 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +// #include +// #include +import "C" + +import ( + "fmt" + "unsafe" +) + +func init() { +} + +func (self *LoadAverage) Get() error { + return nil +} + +func (self *Uptime) Get() error { + return nil +} + +func (self *Mem) Get() error { + var statex C.MEMORYSTATUSEX + statex.dwLength = C.DWORD(unsafe.Sizeof(statex)) + + succeeded := C.GlobalMemoryStatusEx(&statex) + if succeeded == C.FALSE { + lastError := C.GetLastError() + return fmt.Errorf("GlobalMemoryStatusEx failed with error: %d", int(lastError)) + } + + self.Total = uint64(statex.ullTotalPhys) + return nil +} + +func (self *Swap) Get() error { + return notImplemented() +} + +func (self *Cpu) Get() error { + return notImplemented() +} + +func (self *CpuList) Get() error { + return notImplemented() +} + +func (self *FileSystemList) Get() error { + return notImplemented() +} + +func (self *ProcList) Get() error { + return notImplemented() +} + +func (self *ProcState) Get(pid int) error { + return notImplemented() +} + +func (self *ProcMem) Get(pid int) error { + return notImplemented() +} + +func (self *ProcTime) Get(pid int) error { + return notImplemented() +} + +func (self *ProcArgs) Get(pid int) error { + return notImplemented() +} + +func (self *ProcExe) Get(pid int) error { + return notImplemented() +} + +func (self *FileSystemUsage) Get(path string) error { + var availableBytes C.ULARGE_INTEGER + var totalBytes C.ULARGE_INTEGER + var totalFreeBytes C.ULARGE_INTEGER + + pathChars := C.CString(path) + defer C.free(unsafe.Pointer(pathChars)) + + succeeded := C.GetDiskFreeSpaceEx((*C.CHAR)(pathChars), &availableBytes, &totalBytes, &totalFreeBytes) + if succeeded == C.FALSE { + lastError := C.GetLastError() + return fmt.Errorf("GetDiskFreeSpaceEx failed with error: %d", int(lastError)) + } + + self.Total = *(*uint64)(unsafe.Pointer(&totalBytes)) + return nil +} + +func notImplemented() error { + panic("Not Implemented") + return nil +} diff --git a/vendor/github.com/codegangsta/cli/.gitignore b/vendor/github.com/codegangsta/cli/.gitignore new file mode 100644 index 00000000..78237789 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/.gitignore @@ -0,0 +1 @@ +*.coverprofile diff --git a/vendor/github.com/codegangsta/cli/.travis.yml b/vendor/github.com/codegangsta/cli/.travis.yml index baf46abc..657e96a9 100644 --- a/vendor/github.com/codegangsta/cli/.travis.yml +++ b/vendor/github.com/codegangsta/cli/.travis.yml @@ -1,6 +1,32 @@ language: go -go: 1.1 + +sudo: false + +go: +- 1.2.2 +- 1.3.3 +- 1.4 +- 1.5.4 +- 1.6.2 +- master + +matrix: + allow_failures: + - go: master + include: + - go: 1.6.2 + os: osx + - go: 1.1.2 + install: go get -v . + before_script: echo skipping gfmxr on $TRAVIS_GO_VERSION + script: + - ./runtests vet + - ./runtests test + +before_script: +- go get github.com/urfave/gfmxr/... script: -- go vet ./... -- go test -v ./... +- ./runtests vet +- ./runtests test +- ./runtests gfmxr diff --git a/vendor/github.com/codegangsta/cli/CHANGELOG.md b/vendor/github.com/codegangsta/cli/CHANGELOG.md new file mode 100644 index 00000000..0d51d2a5 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/CHANGELOG.md @@ -0,0 +1,317 @@ +# Change Log + +**ATTN**: This project uses [semantic versioning](http://semver.org/). + +## [Unreleased] +### Added +- `./runtests` test runner with coverage tracking by default +- testing on OS X +- testing on Windows + +### Fixed +- Printing of command aliases in help text + +## [1.17.0] - 2016-05-09 +### Added +- Pluggable flag-level help text rendering via `cli.DefaultFlagStringFunc` +- `context.GlobalBoolT` was added as an analogue to `context.GlobalBool` +- Support for hiding commands by setting `Hidden: true` -- this will hide the + commands in help output + +### Changed +- `Float64Flag`, `IntFlag`, and `DurationFlag` default values are no longer + quoted in help text output. +- All flag types now include `(default: {value})` strings following usage when a + default value can be (reasonably) detected. +- `IntSliceFlag` and `StringSliceFlag` usage strings are now more consistent + with non-slice flag types +- Apps now exit with a code of 3 if an unknown subcommand is specified + (previously they printed "No help topic for...", but still exited 0. This + makes it easier to script around apps built using `cli` since they can trust + that a 0 exit code indicated a successful execution. +- cleanups based on [Go Report Card + feedback](https://goreportcard.com/report/github.com/urfave/cli) + +## [1.16.0] - 2016-05-02 +### Added +- `Hidden` field on all flag struct types to omit from generated help text + +### Changed +- `BashCompletionFlag` (`--enable-bash-completion`) is now omitted from +generated help text via the `Hidden` field + +### Fixed +- handling of error values in `HandleAction` and `HandleExitCoder` + +## [1.15.0] - 2016-04-30 +### Added +- This file! +- Support for placeholders in flag usage strings +- `App.Metadata` map for arbitrary data/state management +- `Set` and `GlobalSet` methods on `*cli.Context` for altering values after +parsing. +- Support for nested lookup of dot-delimited keys in structures loaded from +YAML. + +### Changed +- The `App.Action` and `Command.Action` now prefer a return signature of +`func(*cli.Context) error`, as defined by `cli.ActionFunc`. If a non-nil +`error` is returned, there may be two outcomes: + - If the error fulfills `cli.ExitCoder`, then `os.Exit` will be called + automatically + - Else the error is bubbled up and returned from `App.Run` +- Specifying an `Action` with the legacy return signature of +`func(*cli.Context)` will produce a deprecation message to stderr +- Specifying an `Action` that is not a `func` type will produce a non-zero exit +from `App.Run` +- Specifying an `Action` func that has an invalid (input) signature will +produce a non-zero exit from `App.Run` + +### Deprecated +- +`cli.App.RunAndExitOnError`, which should now be done by returning an error +that fulfills `cli.ExitCoder` to `cli.App.Run`. +- the legacy signature for +`cli.App.Action` of `func(*cli.Context)`, which should now have a return +signature of `func(*cli.Context) error`, as defined by `cli.ActionFunc`. + +### Fixed +- Added missing `*cli.Context.GlobalFloat64` method + +## [1.14.0] - 2016-04-03 (backfilled 2016-04-25) +### Added +- Codebeat badge +- Support for categorization via `CategorizedHelp` and `Categories` on app. + +### Changed +- Use `filepath.Base` instead of `path.Base` in `Name` and `HelpName`. + +### Fixed +- Ensure version is not shown in help text when `HideVersion` set. + +## [1.13.0] - 2016-03-06 (backfilled 2016-04-25) +### Added +- YAML file input support. +- `NArg` method on context. + +## [1.12.0] - 2016-02-17 (backfilled 2016-04-25) +### Added +- Custom usage error handling. +- Custom text support in `USAGE` section of help output. +- Improved help messages for empty strings. +- AppVeyor CI configuration. + +### Changed +- Removed `panic` from default help printer func. +- De-duping and optimizations. + +### Fixed +- Correctly handle `Before`/`After` at command level when no subcommands. +- Case of literal `-` argument causing flag reordering. +- Environment variable hints on Windows. +- Docs updates. + +## [1.11.1] - 2015-12-21 (backfilled 2016-04-25) +### Changed +- Use `path.Base` in `Name` and `HelpName` +- Export `GetName` on flag types. + +### Fixed +- Flag parsing when skipping is enabled. +- Test output cleanup. +- Move completion check to account for empty input case. + +## [1.11.0] - 2015-11-15 (backfilled 2016-04-25) +### Added +- Destination scan support for flags. +- Testing against `tip` in Travis CI config. + +### Changed +- Go version in Travis CI config. + +### Fixed +- Removed redundant tests. +- Use correct example naming in tests. + +## [1.10.2] - 2015-10-29 (backfilled 2016-04-25) +### Fixed +- Remove unused var in bash completion. + +## [1.10.1] - 2015-10-21 (backfilled 2016-04-25) +### Added +- Coverage and reference logos in README. + +### Fixed +- Use specified values in help and version parsing. +- Only display app version and help message once. + +## [1.10.0] - 2015-10-06 (backfilled 2016-04-25) +### Added +- More tests for existing functionality. +- `ArgsUsage` at app and command level for help text flexibility. + +### Fixed +- Honor `HideHelp` and `HideVersion` in `App.Run`. +- Remove juvenile word from README. + +## [1.9.0] - 2015-09-08 (backfilled 2016-04-25) +### Added +- `FullName` on command with accompanying help output update. +- Set default `$PROG` in bash completion. + +### Changed +- Docs formatting. + +### Fixed +- Removed self-referential imports in tests. + +## [1.8.0] - 2015-06-30 (backfilled 2016-04-25) +### Added +- Support for `Copyright` at app level. +- `Parent` func at context level to walk up context lineage. + +### Fixed +- Global flag processing at top level. + +## [1.7.1] - 2015-06-11 (backfilled 2016-04-25) +### Added +- Aggregate errors from `Before`/`After` funcs. +- Doc comments on flag structs. +- Include non-global flags when checking version and help. +- Travis CI config updates. + +### Fixed +- Ensure slice type flags have non-nil values. +- Collect global flags from the full command hierarchy. +- Docs prose. + +## [1.7.0] - 2015-05-03 (backfilled 2016-04-25) +### Changed +- `HelpPrinter` signature includes output writer. + +### Fixed +- Specify go 1.1+ in docs. +- Set `Writer` when running command as app. + +## [1.6.0] - 2015-03-23 (backfilled 2016-04-25) +### Added +- Multiple author support. +- `NumFlags` at context level. +- `Aliases` at command level. + +### Deprecated +- `ShortName` at command level. + +### Fixed +- Subcommand help output. +- Backward compatible support for deprecated `Author` and `Email` fields. +- Docs regarding `Names`/`Aliases`. + +## [1.5.0] - 2015-02-20 (backfilled 2016-04-25) +### Added +- `After` hook func support at app and command level. + +### Fixed +- Use parsed context when running command as subcommand. +- Docs prose. + +## [1.4.1] - 2015-01-09 (backfilled 2016-04-25) +### Added +- Support for hiding `-h / --help` flags, but not `help` subcommand. +- Stop flag parsing after `--`. + +### Fixed +- Help text for generic flags to specify single value. +- Use double quotes in output for defaults. +- Use `ParseInt` instead of `ParseUint` for int environment var values. +- Use `0` as base when parsing int environment var values. + +## [1.4.0] - 2014-12-12 (backfilled 2016-04-25) +### Added +- Support for environment variable lookup "cascade". +- Support for `Stdout` on app for output redirection. + +### Fixed +- Print command help instead of app help in `ShowCommandHelp`. + +## [1.3.1] - 2014-11-13 (backfilled 2016-04-25) +### Added +- Docs and example code updates. + +### Changed +- Default `-v / --version` flag made optional. + +## [1.3.0] - 2014-08-10 (backfilled 2016-04-25) +### Added +- `FlagNames` at context level. +- Exposed `VersionPrinter` var for more control over version output. +- Zsh completion hook. +- `AUTHOR` section in default app help template. +- Contribution guidelines. +- `DurationFlag` type. + +## [1.2.0] - 2014-08-02 +### Added +- Support for environment variable defaults on flags plus tests. + +## [1.1.0] - 2014-07-15 +### Added +- Bash completion. +- Optional hiding of built-in help command. +- Optional skipping of flag parsing at command level. +- `Author`, `Email`, and `Compiled` metadata on app. +- `Before` hook func support at app and command level. +- `CommandNotFound` func support at app level. +- Command reference available on context. +- `GenericFlag` type. +- `Float64Flag` type. +- `BoolTFlag` type. +- `IsSet` flag helper on context. +- More flag lookup funcs at context level. +- More tests & docs. + +### Changed +- Help template updates to account for presence/absence of flags. +- Separated subcommand help template. +- Exposed `HelpPrinter` var for more control over help output. + +## [1.0.0] - 2013-11-01 +### Added +- `help` flag in default app flag set and each command flag set. +- Custom handling of argument parsing errors. +- Command lookup by name at app level. +- `StringSliceFlag` type and supporting `StringSlice` type. +- `IntSliceFlag` type and supporting `IntSlice` type. +- Slice type flag lookups by name at context level. +- Export of app and command help functions. +- More tests & docs. + +## 0.1.0 - 2013-07-22 +### Added +- Initial implementation. + +[Unreleased]: https://github.com/urfave/cli/compare/v1.17.0...HEAD +[1.17.0]: https://github.com/urfave/cli/compare/v1.16.0...v1.17.0 +[1.16.0]: https://github.com/urfave/cli/compare/v1.15.0...v1.16.0 +[1.15.0]: https://github.com/urfave/cli/compare/v1.14.0...v1.15.0 +[1.14.0]: https://github.com/urfave/cli/compare/v1.13.0...v1.14.0 +[1.13.0]: https://github.com/urfave/cli/compare/v1.12.0...v1.13.0 +[1.12.0]: https://github.com/urfave/cli/compare/v1.11.1...v1.12.0 +[1.11.1]: https://github.com/urfave/cli/compare/v1.11.0...v1.11.1 +[1.11.0]: https://github.com/urfave/cli/compare/v1.10.2...v1.11.0 +[1.10.2]: https://github.com/urfave/cli/compare/v1.10.1...v1.10.2 +[1.10.1]: https://github.com/urfave/cli/compare/v1.10.0...v1.10.1 +[1.10.0]: https://github.com/urfave/cli/compare/v1.9.0...v1.10.0 +[1.9.0]: https://github.com/urfave/cli/compare/v1.8.0...v1.9.0 +[1.8.0]: https://github.com/urfave/cli/compare/v1.7.1...v1.8.0 +[1.7.1]: https://github.com/urfave/cli/compare/v1.7.0...v1.7.1 +[1.7.0]: https://github.com/urfave/cli/compare/v1.6.0...v1.7.0 +[1.6.0]: https://github.com/urfave/cli/compare/v1.5.0...v1.6.0 +[1.5.0]: https://github.com/urfave/cli/compare/v1.4.1...v1.5.0 +[1.4.1]: https://github.com/urfave/cli/compare/v1.4.0...v1.4.1 +[1.4.0]: https://github.com/urfave/cli/compare/v1.3.1...v1.4.0 +[1.3.1]: https://github.com/urfave/cli/compare/v1.3.0...v1.3.1 +[1.3.0]: https://github.com/urfave/cli/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/urfave/cli/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/urfave/cli/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/urfave/cli/compare/v0.1.0...v1.0.0 diff --git a/vendor/github.com/codegangsta/cli/README.md b/vendor/github.com/codegangsta/cli/README.md index 2453c1af..80337c56 100644 --- a/vendor/github.com/codegangsta/cli/README.md +++ b/vendor/github.com/codegangsta/cli/README.md @@ -1,38 +1,99 @@ -[![Build Status](https://travis-ci.org/codegangsta/cli.png?branch=master)](https://travis-ci.org/codegangsta/cli) +[![Build Status](https://travis-ci.org/urfave/cli.svg?branch=master)](https://travis-ci.org/urfave/cli) +[![Windows Build Status](https://ci.appveyor.com/api/projects/status/rtgk5xufi932pb2v?svg=true)](https://ci.appveyor.com/project/meatballhat/cli) +[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli) +[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli) +[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli) +[![top level coverage](https://gocover.io/_badge/github.com/urfave/cli?0 "top level coverage")](http://gocover.io/github.com/urfave/cli) / +[![altsrc coverage](https://gocover.io/_badge/github.com/urfave/cli/altsrc?0 "altsrc coverage")](http://gocover.io/github.com/urfave/cli/altsrc) -# cli.go -cli.go is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. -You can view the API docs here: -http://godoc.org/github.com/codegangsta/cli +# cli + +**Notice:** This is the library formerly known as +`github.com/codegangsta/cli` -- Github will automatically redirect requests +to this repository, but we recommend updating your references for clarity. + +cli is a simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. ## Overview + Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app. -This is where cli.go comes into play. cli.go makes command line programming fun, organized, and expressive! +**This is where cli comes into play.** cli makes command line programming fun, organized, and expressive! ## Installation -Make sure you have a working Go environment (go 1.1 is *required*). [See the install instructions](http://golang.org/doc/install.html). -To install cli.go, simply run: +Make sure you have a working Go environment. Go version 1.1+ is required for +core cli, whereas use of the [`./altsrc`](./altsrc) input extensions requires Go +version 1.2+. [See the install +instructions](http://golang.org/doc/install.html). + +To install cli, simply run: ``` -$ go get github.com/codegangsta/cli +$ go get github.com/urfave/cli ``` -Make sure your PATH includes to the `$GOPATH/bin` directory so your commands can be easily used: +Make sure your `PATH` includes to the `$GOPATH/bin` directory so your commands can be easily used: ``` export PATH=$PATH:$GOPATH/bin ``` +### Supported platforms + +cli is tested against multiple versions of Go on Linux, and against the latest +released version of Go on OS X and Windows. For full details, see +[`./.travis.yml`](./.travis.yml) and [`./appveyor.yml`](./appveyor.yml). + +### Using the `v2` branch + +There is currently a long-lived branch named `v2` that is intended to land as +the new `master` branch once development there has settled down. The current +`master` branch (mirrored as `v1`) is being manually merged into `v2` on +an irregular human-based schedule, but generally if one wants to "upgrade" to +`v2` *now* and accept the volatility (read: "awesomeness") that comes along with +that, please use whatever version pinning of your preference, such as via +`gopkg.in`: + +``` +$ go get gopkg.in/urfave/cli.v2 +``` + +``` go +... +import ( + "gopkg.in/urfave/cli.v2" // imports as package "cli" +) +... +``` + +### Pinning to the `v1` branch + +Similarly to the section above describing use of the `v2` branch, if one wants +to avoid any unexpected compatibility pains once `v2` becomes `master`, then +pinning to the `v1` branch is an acceptable option, e.g.: + +``` +$ go get gopkg.in/urfave/cli.v1 +``` + +``` go +... +import ( + "gopkg.in/urfave/cli.v1" // imports as package "cli" +) +... +``` + ## Getting Started -One of the philosophies behind cli.go is that an API should be playful and full of discovery. So a cli.go app can be as little as one line of code in `main()`. + +One of the philosophies behind cli is that an API should be playful and full of discovery. So a cli app can be as little as one line of code in `main()`. ``` go package main import ( "os" - "github.com/codegangsta/cli" + "github.com/urfave/cli" ) func main() { @@ -42,22 +103,28 @@ func main() { This app will run and show help text, but is not very useful. Let's give an action to execute and some help documentation: + ``` go package main import ( + "fmt" "os" - "github.com/codegangsta/cli" + + "github.com/urfave/cli" ) func main() { app := cli.NewApp() app.Name = "boom" app.Usage = "make an explosive entrance" - app.Action = func(c *cli.Context) { - println("boom! I say!") + app.Action = func(c *cli.Context) error { + fmt.Println("boom! I say!") + return nil } - + app.Run(os.Args) } ``` @@ -68,23 +135,30 @@ Running this already gives you a ton of functionality, plus support for things l Being a programmer can be a lonely job. Thankfully by the power of automation that is not the case! Let's create a greeter app to fend off our demons of loneliness! +Start by creating a directory named `greet`, and within it, add a file, `greet.go` with the following code in it: + + ``` go -/* greet.go */ package main import ( + "fmt" "os" - "github.com/codegangsta/cli" + + "github.com/urfave/cli" ) func main() { app := cli.NewApp() app.Name = "greet" app.Usage = "fight the loneliness!" - app.Action = func(c *cli.Context) { - println("Hello friend!") + app.Action = func(c *cli.Context) error { + fmt.Println("Hello friend!") + return nil } - + app.Run(os.Args) } ``` @@ -102,7 +176,8 @@ $ greet Hello friend! ``` -cli.go also generates some bitchass help text: +cli also generates neat help text: + ``` $ greet help NAME: @@ -122,18 +197,22 @@ GLOBAL OPTIONS ``` ### Arguments -You can lookup arguments by calling the `Args` function on cli.Context. + +You can lookup arguments by calling the `Args` function on `cli.Context`. ``` go ... -app.Action = func(c *cli.Context) { - println("Hello", c.Args()[0]) +app.Action = func(c *cli.Context) error { + fmt.Println("Hello", c.Args()[0]) + return nil } ... ``` ### Flags + Setting and querying flags is simple. + ``` go ... app.Flags = []cli.Flag { @@ -143,23 +222,76 @@ app.Flags = []cli.Flag { Usage: "language for the greeting", }, } -app.Action = func(c *cli.Context) { +app.Action = func(c *cli.Context) error { name := "someone" - if len(c.Args()) > 0 { + if c.NArg() > 0 { name = c.Args()[0] } if c.String("lang") == "spanish" { - println("Hola", name) + fmt.Println("Hola", name) } else { - println("Hello", name) + fmt.Println("Hello", name) } + return nil } ... ``` +You can also set a destination variable for a flag, to which the content will be scanned. + +``` go +... +var language string +app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang", + Value: "english", + Usage: "language for the greeting", + Destination: &language, + }, +} +app.Action = func(c *cli.Context) error { + name := "someone" + if c.NArg() > 0 { + name = c.Args()[0] + } + if language == "spanish" { + fmt.Println("Hola", name) + } else { + fmt.Println("Hello", name) + } + return nil +} +... +``` + +See full list of flags at http://godoc.org/github.com/urfave/cli + +#### Placeholder Values + +Sometimes it's useful to specify a flag's value within the usage string itself. Such placeholders are +indicated with back quotes. + +For example this: + +```go +cli.StringFlag{ + Name: "config, c", + Usage: "Load configuration from `FILE`", +} +``` + +Will result in help output like: + +``` +--config FILE, -c FILE Load configuration from FILE +``` + +Note that only the first placeholder is used. Subsequent back-quoted words will be left as-is. + #### Alternate Names -You can set alternate (or short) names for flags by providing a comma-delimited list for the Name. e.g. +You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g. ``` go app.Flags = []cli.Flag { @@ -171,9 +303,11 @@ app.Flags = []cli.Flag { } ``` +That flag can then be set with `--lang spanish` or `-l spanish`. Note that giving two different forms of the same flag in the same command invocation is an error. + #### Values from the Environment -You can also have the default value set from the environment via EnvVar. e.g. +You can also have the default value set from the environment via `EnvVar`. e.g. ``` go app.Flags = []cli.Flag { @@ -186,61 +320,195 @@ app.Flags = []cli.Flag { } ``` -That flag can then be set with `--lang spanish` or `-l spanish`. Note that giving two different forms of the same flag in the same command invocation is an error. +The `EnvVar` may also be given as a comma-delimited "cascade", where the first environment variable that resolves is used as the default. + +``` go +app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG", + }, +} +``` + +#### Values from alternate input sources (YAML and others) + +There is a separate package altsrc that adds support for getting flag values from other input sources like YAML. + +In order to get values for a flag from an alternate input source the following code would be added to wrap an existing cli.Flag like below: + +``` go + altsrc.NewIntFlag(cli.IntFlag{Name: "test"}) +``` + +Initialization must also occur for these flags. Below is an example initializing getting data from a yaml file below. + +``` go + command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) +``` + +The code above will use the "load" string as a flag name to get the file name of a yaml file from the cli.Context. +It will then use that file name to initialize the yaml input source for any flags that are defined on that command. +As a note the "load" flag used would also have to be defined on the command flags in order for this code snipped to work. + +Currently only YAML files are supported but developers can add support for other input sources by implementing the +altsrc.InputSourceContext for their given sources. + +Here is a more complete sample of a command using YAML support: + +``` go + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + // Action to run + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "test"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) + err := command.Run(c) +``` ### Subcommands Subcommands can be defined for a more git-like command line app. + ```go ... app.Commands = []cli.Command{ { Name: "add", - ShortName: "a", + Aliases: []string{"a"}, Usage: "add a task to the list", - Action: func(c *cli.Context) { - println("added task: ", c.Args().First()) + Action: func(c *cli.Context) error { + fmt.Println("added task: ", c.Args().First()) + return nil }, }, { Name: "complete", - ShortName: "c", + Aliases: []string{"c"}, Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) + Action: func(c *cli.Context) error { + fmt.Println("completed task: ", c.Args().First()) + return nil }, }, { Name: "template", - ShortName: "r", + Aliases: []string{"r"}, Usage: "options for task templates", Subcommands: []cli.Command{ { Name: "add", Usage: "add a new template", - Action: func(c *cli.Context) { - println("new task template: ", c.Args().First()) + Action: func(c *cli.Context) error { + fmt.Println("new task template: ", c.Args().First()) + return nil }, }, { Name: "remove", Usage: "remove an existing template", - Action: func(c *cli.Context) { - println("removed task template: ", c.Args().First()) + Action: func(c *cli.Context) error { + fmt.Println("removed task template: ", c.Args().First()) + return nil }, }, }, - }, + }, } ... ``` +### Subcommands categories + +For additional organization in apps that have many subcommands, you can +associate a category for each command to group them together in the help +output. + +E.g. + +```go +... + app.Commands = []cli.Command{ + { + Name: "noop", + }, + { + Name: "add", + Category: "template", + }, + { + Name: "remove", + Category: "template", + }, + } +... +``` + +Will include: + +``` +... +COMMANDS: + noop + + Template actions: + add + remove +... +``` + +### Exit code + +Calling `App.Run` will not automatically call `os.Exit`, which means that by +default the exit code will "fall through" to being `0`. An explicit exit code +may be set by returning a non-nil error that fulfills `cli.ExitCoder`, *or* a +`cli.MultiError` that includes an error that fulfills `cli.ExitCoder`, e.g.: + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + app.Flags = []cli.Flag{ + cli.BoolTFlag{ + Name: "ginger-crouton", + Usage: "is it in the soup?", + }, + } + app.Action = func(ctx *cli.Context) error { + if !ctx.Bool("ginger-crouton") { + return cli.NewExitError("it is not in the soup", 86) + } + return nil + } + + app.Run(os.Args) +} +``` + ### Bash Completion -You can enable completion commands by setting the EnableBashCompletion -flag on the App object. By default, this setting will only auto-complete to +You can enable completion commands by setting the `EnableBashCompletion` +flag on the `App` object. By default, this setting will only auto-complete to show an app's subcommands, but you can write your own completion methods for the App or its subcommands. + ```go ... var tasks = []string{"cook", "clean", "laundry", "eat", "sleep", "code"} @@ -248,19 +516,20 @@ app := cli.NewApp() app.EnableBashCompletion = true app.Commands = []cli.Command{ { - Name: "complete", - ShortName: "c", + Name: "complete", + Aliases: []string{"c"}, Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) + Action: func(c *cli.Context) error { + fmt.Println("completed task: ", c.Args().First()) + return nil }, BashComplete: func(c *cli.Context) { // This will complete if no args are passed - if len(c.Args()) > 0 { + if c.NArg() > 0 { return } for _, t := range tasks { - println(t) + fmt.Println(t) } }, } @@ -270,11 +539,105 @@ app.Commands = []cli.Command{ #### To Enable -Source the autocomplete/bash_autocomplete file in your .bashrc file while -setting the PROG variable to the name of your program: +Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while +setting the `PROG` variable to the name of your program: `PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` +#### To Distribute -## About -cli.go is written by none other than the [Code Gangsta](http://codegangsta.io) +Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename +it to the name of the program you wish to add autocomplete support for (or +automatically install it there if you are distributing a package). Don't forget +to source the file to make it active in the current shell. + +``` +sudo cp src/bash_autocomplete /etc/bash_completion.d/ +source /etc/bash_completion.d/ +``` + +Alternatively, you can just document that users should source the generic +`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set +to the name of their program (as above). + +### Generated Help Text Customization + +All of the help text generation may be customized, and at multiple levels. The +templates are exposed as variables `AppHelpTemplate`, `CommandHelpTemplate`, and +`SubcommandHelpTemplate` which may be reassigned or augmented, and full override +is possible by assigning a compatible func to the `cli.HelpPrinter` variable, +e.g.: + + +``` go +package main + +import ( + "fmt" + "io" + "os" + + "github.com/urfave/cli" +) + +func main() { + // EXAMPLE: Append to an existing template + cli.AppHelpTemplate = fmt.Sprintf(`%s + +WEBSITE: http://awesometown.example.com + +SUPPORT: support@awesometown.example.com + +`, cli.AppHelpTemplate) + + // EXAMPLE: Override a template + cli.AppHelpTemplate = `NAME: + {{.Name}} - {{.Usage}} +USAGE: + {{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command +[command options]{{end}} {{if +.ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} + {{if len .Authors}} +AUTHOR(S): + {{range .Authors}}{{ . }}{{end}} + {{end}}{{if .Commands}} +COMMANDS: +{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t" +}}{{.Usage}}{{ "\n" }}{{end}}{{end}}{{end}}{{if .VisibleFlags}} +GLOBAL OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}}{{if .Copyright }} +COPYRIGHT: + {{.Copyright}} + {{end}}{{if .Version}} +VERSION: + {{.Version}} + {{end}} +` + + // EXAMPLE: Replace the `HelpPrinter` func + cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) { + fmt.Println("Ha HA. I pwnd the help!!1") + } + + cli.NewApp().Run(os.Args) +} +``` + +## Contribution Guidelines + +Feel free to put up a pull request to fix a bug or maybe add a feature. I will +give it a code review and make sure that it does not break backwards +compatibility. If I or any other collaborators agree that it is in line with +the vision of the project, we will work with you to get the code into +a mergeable state and merge it into the master branch. + +If you have contributed something significant to the project, we will most +likely add you as a collaborator. As a collaborator you are given the ability +to merge others pull requests. It is very important that new code does not +break existing code, so be careful about what code you do choose to merge. + +If you feel like you have contributed to the project but have not yet been +added as a collaborator, we probably forgot to add you, please open an issue. diff --git a/vendor/github.com/codegangsta/cli/app.go b/vendor/github.com/codegangsta/cli/app.go index e193b828..1e33cdc7 100644 --- a/vendor/github.com/codegangsta/cli/app.go +++ b/vendor/github.com/codegangsta/cli/app.go @@ -2,18 +2,43 @@ package cli import ( "fmt" + "io" "io/ioutil" "os" + "path/filepath" + "reflect" + "sort" "time" ) -// App is the main structure of a cli application. It is recomended that -// and app be created with the cli.NewApp() function +var ( + changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md" + appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) + runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL) + + contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." + + errNonFuncAction = NewExitError("ERROR invalid Action type. "+ + fmt.Sprintf("Must be a func of type `cli.ActionFunc`. %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) + errInvalidActionSignature = NewExitError("ERROR invalid Action signature. "+ + fmt.Sprintf("Must be `cli.ActionFunc`. %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) +) + +// App is the main structure of a cli application. It is recommended that +// an app be created with the cli.NewApp() function type App struct { - // The name of the program. Defaults to os.Args[0] + // The name of the program. Defaults to path.Base(os.Args[0]) Name string + // Full name of command for help, defaults to Name + HelpName string // Description of the program. Usage string + // Text to override the USAGE section of help + UsageText string + // Description of the program argument format. + ArgsUsage string // Version of the program Version string // List of commands to execute @@ -24,21 +49,45 @@ type App struct { EnableBashCompletion bool // Boolean to hide built-in help command HideHelp bool + // Boolean to hide built-in version flag and the VERSION section of help + HideVersion bool + // Populate on app startup, only gettable through method Categories() + categories CommandCategories // An action to execute when the bash-completion flag is set - BashComplete func(context *Context) + BashComplete BashCompleteFunc // An action to execute before any subcommands are run, but after the context is ready // If a non-nil error is returned, no subcommands are run - Before func(context *Context) error + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc // The action to execute when no subcommands are specified - Action func(context *Context) + Action interface{} + // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind + // of deprecation period has passed, maybe? + // Execute this function if the proper command cannot be found - CommandNotFound func(context *Context, command string) + CommandNotFound CommandNotFoundFunc + // Execute this function if an usage error occurs + OnUsageError OnUsageErrorFunc // Compilation date Compiled time.Time - // Author + // List of all authors who contributed + Authors []Author + // Copyright of the binary if any + Copyright string + // Name of Author (Note: Use App.Authors, this is deprecated) Author string - // Author e-mail + // Email of Author (Note: Use App.Authors, this is deprecated) Email string + // Writer writer to write output to + Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer + // Other custom info + Metadata map[string]interface{} + + didSetup bool } // Tries to find out when this binary was compiled. @@ -51,70 +100,130 @@ func compileTime() time.Time { return info.ModTime() } -// Creates a new cli Application with some reasonable defaults for Name, Usage, Version and Action. +// NewApp creates a new cli Application with some reasonable defaults for Name, +// Usage, Version and Action. func NewApp() *App { return &App{ - Name: os.Args[0], + Name: filepath.Base(os.Args[0]), + HelpName: filepath.Base(os.Args[0]), Usage: "A new cli application", + UsageText: "", Version: "0.0.0", BashComplete: DefaultAppComplete, Action: helpCommand.Action, Compiled: compileTime(), - Author: "Author", - Email: "unknown@email", + Writer: os.Stdout, } } -// Entry point to the cli app. Parses the arguments slice and routes to the proper flag/args combination -func (a *App) Run(arguments []string) error { +// Setup runs initialization code to ensure all data structures are ready for +// `Run` or inspection prior to `Run`. It is internally called by `Run`, but +// will return early if setup has already happened. +func (a *App) Setup() { + if a.didSetup { + return + } + + a.didSetup = true + + if a.Author != "" || a.Email != "" { + a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) + } + + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + a.categories = CommandCategories{} + for _, command := range a.Commands { + a.categories = a.categories.AddCommand(command.Category, command) + } + sort.Sort(a.categories) + // append help to commands if a.Command(helpCommand.Name) == nil && !a.HideHelp { a.Commands = append(a.Commands, helpCommand) - a.appendFlag(HelpFlag) + if (HelpFlag != BoolFlag{}) { + a.appendFlag(HelpFlag) + } } //append version/help flags if a.EnableBashCompletion { a.appendFlag(BashCompletionFlag) } - a.appendFlag(VersionFlag) + + if !a.HideVersion { + a.appendFlag(VersionFlag) + } +} + +// Run is the entry point to the cli app. Parses the arguments slice and routes +// to the proper flag/args combination +func (a *App) Run(arguments []string) (err error) { + a.Setup() // parse flags set := flagSet(a.Name, a.Flags) set.SetOutput(ioutil.Discard) - err := set.Parse(arguments[1:]) + err = set.Parse(arguments[1:]) nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, nil) if nerr != nil { - fmt.Println(nerr) - context := NewContext(a, set, set) + fmt.Fprintln(a.Writer, nerr) ShowAppHelp(context) - fmt.Println("") return nerr } - context := NewContext(a, set, set) - - if err != nil { - fmt.Printf("Incorrect Usage.\n\n") - ShowAppHelp(context) - fmt.Println("") - return err - } if checkCompletions(context) { return nil } - if checkHelp(context) { + if err != nil { + if a.OnUsageError != nil { + err := a.OnUsageError(context, err, false) + HandleExitCoder(err) + return err + } + fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") + ShowAppHelp(context) + return err + } + + if !a.HideHelp && checkHelp(context) { + ShowAppHelp(context) return nil } - if checkVersion(context) { + if !a.HideVersion && checkVersion(context) { + ShowVersion(context) return nil } + if a.After != nil { + defer func() { + if afterErr := a.After(context); afterErr != nil { + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + if a.Before != nil { - err := a.Before(context) - if err != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + fmt.Fprintf(a.Writer, "%v\n\n", beforeErr) + ShowAppHelp(context) + HandleExitCoder(beforeErr) + err = beforeErr return err } } @@ -129,28 +238,42 @@ func (a *App) Run(arguments []string) error { } // Run default Action - a.Action(context) - return nil + err = HandleAction(a.Action, context) + + HandleExitCoder(err) + return err } -// Another entry point to the cli app, takes care of passing arguments and error handling +// DEPRECATED: Another entry point to the cli app, takes care of passing arguments and error handling func (a *App) RunAndExitOnError() { if err := a.Run(os.Args); err != nil { - os.Stderr.WriteString(fmt.Sprintln(err)) - os.Exit(1) + fmt.Fprintln(a.errWriter(), err) + OsExiter(1) } } -// Invokes the subcommand given the context, parses ctx.Args() to generate command-specific flags -func (a *App) RunAsSubcommand(ctx *Context) error { +// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to +// generate command-specific flags +func (a *App) RunAsSubcommand(ctx *Context) (err error) { // append help to commands if len(a.Commands) > 0 { if a.Command(helpCommand.Name) == nil && !a.HideHelp { a.Commands = append(a.Commands, helpCommand) - a.appendFlag(HelpFlag) + if (HelpFlag != BoolFlag{}) { + a.appendFlag(HelpFlag) + } } } + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + // append flags if a.EnableBashCompletion { a.appendFlag(BashCompletionFlag) @@ -159,31 +282,36 @@ func (a *App) RunAsSubcommand(ctx *Context) error { // parse flags set := flagSet(a.Name, a.Flags) set.SetOutput(ioutil.Discard) - err := set.Parse(ctx.Args().Tail()) + err = set.Parse(ctx.Args().Tail()) nerr := normalizeFlags(a.Flags, set) - context := NewContext(a, set, ctx.globalSet) + context := NewContext(a, set, ctx) if nerr != nil { - fmt.Println(nerr) + fmt.Fprintln(a.Writer, nerr) + fmt.Fprintln(a.Writer) if len(a.Commands) > 0 { ShowSubcommandHelp(context) } else { ShowCommandHelp(ctx, context.Args().First()) } - fmt.Println("") return nerr } - if err != nil { - fmt.Printf("Incorrect Usage.\n\n") - ShowSubcommandHelp(context) - return err - } - if checkCompletions(context) { return nil } + if err != nil { + if a.OnUsageError != nil { + err = a.OnUsageError(context, err, true) + HandleExitCoder(err) + return err + } + fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") + ShowSubcommandHelp(context) + return err + } + if len(a.Commands) > 0 { if checkSubcommandHelp(context) { return nil @@ -194,9 +322,25 @@ func (a *App) RunAsSubcommand(ctx *Context) error { } } + if a.After != nil { + defer func() { + afterErr := a.After(context) + if afterErr != nil { + HandleExitCoder(err) + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + if a.Before != nil { - err := a.Before(context) - if err != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + HandleExitCoder(beforeErr) + err = beforeErr return err } } @@ -211,16 +355,13 @@ func (a *App) RunAsSubcommand(ctx *Context) error { } // Run default Action - if len(a.Commands) > 0 { - a.Action(context) - } else { - a.Action(ctx) - } + err = HandleAction(a.Action, context) - return nil + HandleExitCoder(err) + return err } -// Returns the named command on App. Returns nil if the command does not exist +// Command returns the named command on App. Returns nil if the command does not exist func (a *App) Command(name string) *Command { for _, c := range a.Commands { if c.HasName(name) { @@ -231,6 +372,46 @@ func (a *App) Command(name string) *Command { return nil } +// Categories returns a slice containing all the categories with the commands they contain +func (a *App) Categories() CommandCategories { + return a.categories +} + +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (a *App) VisibleCategories() []*CommandCategory { + ret := []*CommandCategory{} + for _, category := range a.categories { + if visible := func() *CommandCategory { + for _, command := range category.Commands { + if !command.Hidden { + return category + } + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (a *App) VisibleCommands() []Command { + ret := []Command{} + for _, command := range a.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (a *App) VisibleFlags() []Flag { + return visibleFlags(a.Flags) +} + func (a *App) hasFlag(flag Flag) bool { for _, f := range a.Flags { if flag == f { @@ -241,8 +422,71 @@ func (a *App) hasFlag(flag Flag) bool { return false } +func (a *App) errWriter() io.Writer { + + // When the app ErrWriter is nil use the package level one. + if a.ErrWriter == nil { + return ErrWriter + } + + return a.ErrWriter +} + func (a *App) appendFlag(flag Flag) { if !a.hasFlag(flag) { a.Flags = append(a.Flags, flag) } } + +// Author represents someone who has contributed to a cli project. +type Author struct { + Name string // The Authors name + Email string // The Authors email +} + +// String makes Author comply to the Stringer interface, to allow an easy print in the templating process +func (a Author) String() string { + e := "" + if a.Email != "" { + e = "<" + a.Email + "> " + } + + return fmt.Sprintf("%v %v", a.Name, e) +} + +// HandleAction uses ✧✧✧reflection✧✧✧ to figure out if the given Action is an +// ActionFunc, a func with the legacy signature for Action, or some other +// invalid thing. If it's an ActionFunc or a func with the legacy signature for +// Action, the func is run! +func HandleAction(action interface{}, context *Context) (err error) { + defer func() { + if r := recover(); r != nil { + switch r.(type) { + case error: + err = r.(error) + default: + err = NewExitError(fmt.Sprintf("ERROR unknown Action error: %v. See %s", r, appActionDeprecationURL), 2) + } + } + }() + + if reflect.TypeOf(action).Kind() != reflect.Func { + return errNonFuncAction + } + + vals := reflect.ValueOf(action).Call([]reflect.Value{reflect.ValueOf(context)}) + + if len(vals) == 0 { + return nil + } + + if len(vals) > 1 { + return errInvalidActionSignature + } + + if retErr, ok := vals[0].Interface().(error); vals[0].IsValid() && ok { + return retErr + } + + return err +} diff --git a/vendor/github.com/codegangsta/cli/app_test.go b/vendor/github.com/codegangsta/cli/app_test.go deleted file mode 100644 index d4216fee..00000000 --- a/vendor/github.com/codegangsta/cli/app_test.go +++ /dev/null @@ -1,403 +0,0 @@ -package cli_test - -import ( - "fmt" - "os" - "testing" - - "github.com/codegangsta/cli" -) - -func ExampleApp() { - // set args for examples sake - os.Args = []string{"greet", "--name", "Jeremy"} - - app := cli.NewApp() - app.Name = "greet" - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, - } - app.Action = func(c *cli.Context) { - fmt.Printf("Hello %v\n", c.String("name")) - } - app.Run(os.Args) - // Output: - // Hello Jeremy -} - -func ExampleAppSubcommand() { - // set args for examples sake - os.Args = []string{"say", "hi", "english", "--name", "Jeremy"} - app := cli.NewApp() - app.Name = "say" - app.Commands = []cli.Command{ - { - Name: "hello", - ShortName: "hi", - Usage: "use it to see a description", - Description: "This is how we describe hello the function", - Subcommands: []cli.Command{ - { - Name: "english", - ShortName: "en", - Usage: "sends a greeting in english", - Description: "greets someone in english", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "name", - Value: "Bob", - Usage: "Name of the person to greet", - }, - }, - Action: func(c *cli.Context) { - fmt.Println("Hello,", c.String("name")) - }, - }, - }, - }, - } - - app.Run(os.Args) - // Output: - // Hello, Jeremy -} - -func ExampleAppHelp() { - // set args for examples sake - os.Args = []string{"greet", "h", "describeit"} - - app := cli.NewApp() - app.Name = "greet" - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, - } - app.Commands = []cli.Command{ - { - Name: "describeit", - ShortName: "d", - Usage: "use it to see a description", - Description: "This is how we describe describeit the function", - Action: func(c *cli.Context) { - fmt.Printf("i like to describe things") - }, - }, - } - app.Run(os.Args) - // Output: - // NAME: - // describeit - use it to see a description - // - // USAGE: - // command describeit [arguments...] - // - // DESCRIPTION: - // This is how we describe describeit the function -} - -func ExampleAppBashComplete() { - // set args for examples sake - os.Args = []string{"greet", "--generate-bash-completion"} - - app := cli.NewApp() - app.Name = "greet" - app.EnableBashCompletion = true - app.Commands = []cli.Command{ - { - Name: "describeit", - ShortName: "d", - Usage: "use it to see a description", - Description: "This is how we describe describeit the function", - Action: func(c *cli.Context) { - fmt.Printf("i like to describe things") - }, - }, { - Name: "next", - Usage: "next example", - Description: "more stuff to see when generating bash completion", - Action: func(c *cli.Context) { - fmt.Printf("the next example") - }, - }, - } - - app.Run(os.Args) - // Output: - // describeit - // d - // next - // help - // h -} - -func TestApp_Run(t *testing.T) { - s := "" - - app := cli.NewApp() - app.Action = func(c *cli.Context) { - s = s + c.Args().First() - } - - err := app.Run([]string{"command", "foo"}) - expect(t, err, nil) - err = app.Run([]string{"command", "bar"}) - expect(t, err, nil) - expect(t, s, "foobar") -} - -var commandAppTests = []struct { - name string - expected bool -}{ - {"foobar", true}, - {"batbaz", true}, - {"b", true}, - {"f", true}, - {"bat", false}, - {"nothing", false}, -} - -func TestApp_Command(t *testing.T) { - app := cli.NewApp() - fooCommand := cli.Command{Name: "foobar", ShortName: "f"} - batCommand := cli.Command{Name: "batbaz", ShortName: "b"} - app.Commands = []cli.Command{ - fooCommand, - batCommand, - } - - for _, test := range commandAppTests { - expect(t, app.Command(test.name) != nil, test.expected) - } -} - -func TestApp_CommandWithArgBeforeFlags(t *testing.T) { - var parsedOption, firstArg string - - app := cli.NewApp() - command := cli.Command{ - Name: "cmd", - Flags: []cli.Flag{ - cli.StringFlag{Name: "option", Value: "", Usage: "some option"}, - }, - Action: func(c *cli.Context) { - parsedOption = c.String("option") - firstArg = c.Args().First() - }, - } - app.Commands = []cli.Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--option", "my-option"}) - - expect(t, parsedOption, "my-option") - expect(t, firstArg, "my-arg") -} - -func TestApp_Float64Flag(t *testing.T) { - var meters float64 - - app := cli.NewApp() - app.Flags = []cli.Flag{ - cli.Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"}, - } - app.Action = func(c *cli.Context) { - meters = c.Float64("height") - } - - app.Run([]string{"", "--height", "1.93"}) - expect(t, meters, 1.93) -} - -func TestApp_ParseSliceFlags(t *testing.T) { - var parsedOption, firstArg string - var parsedIntSlice []int - var parsedStringSlice []string - - app := cli.NewApp() - command := cli.Command{ - Name: "cmd", - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "p", Value: &cli.IntSlice{}, Usage: "set one or more ip addr"}, - cli.StringSliceFlag{Name: "ip", Value: &cli.StringSlice{}, Usage: "set one or more ports to open"}, - }, - Action: func(c *cli.Context) { - parsedIntSlice = c.IntSlice("p") - parsedStringSlice = c.StringSlice("ip") - parsedOption = c.String("option") - firstArg = c.Args().First() - }, - } - app.Commands = []cli.Command{command} - - app.Run([]string{"", "cmd", "my-arg", "-p", "22", "-p", "80", "-ip", "8.8.8.8", "-ip", "8.8.4.4"}) - - IntsEquals := func(a, b []int) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true - } - - StrsEquals := func(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true - } - var expectedIntSlice = []int{22, 80} - var expectedStringSlice = []string{"8.8.8.8", "8.8.4.4"} - - if !IntsEquals(parsedIntSlice, expectedIntSlice) { - t.Errorf("%v does not match %v", parsedIntSlice, expectedIntSlice) - } - - if !StrsEquals(parsedStringSlice, expectedStringSlice) { - t.Errorf("%v does not match %v", parsedStringSlice, expectedStringSlice) - } -} - -func TestApp_BeforeFunc(t *testing.T) { - beforeRun, subcommandRun := false, false - beforeError := fmt.Errorf("fail") - var err error - - app := cli.NewApp() - - app.Before = func(c *cli.Context) error { - beforeRun = true - s := c.String("opt") - if s == "fail" { - return beforeError - } - - return nil - } - - app.Commands = []cli.Command{ - cli.Command{ - Name: "sub", - Action: func(c *cli.Context) { - subcommandRun = true - }, - }, - } - - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "opt"}, - } - - // run with the Before() func succeeding - err = app.Run([]string{"command", "--opt", "succeed", "sub"}) - - if err != nil { - t.Fatalf("Run error: %s", err) - } - - if beforeRun == false { - t.Errorf("Before() not executed when expected") - } - - if subcommandRun == false { - t.Errorf("Subcommand not executed when expected") - } - - // reset - beforeRun, subcommandRun = false, false - - // run with the Before() func failing - err = app.Run([]string{"command", "--opt", "fail", "sub"}) - - // should be the same error produced by the Before func - if err != beforeError { - t.Errorf("Run error expected, but not received") - } - - if beforeRun == false { - t.Errorf("Before() not executed when expected") - } - - if subcommandRun == true { - t.Errorf("Subcommand executed when NOT expected") - } - -} - -func TestAppHelpPrinter(t *testing.T) { - oldPrinter := cli.HelpPrinter - defer func() { - cli.HelpPrinter = oldPrinter - }() - - var wasCalled = false - cli.HelpPrinter = func(template string, data interface{}) { - wasCalled = true - } - - app := cli.NewApp() - app.Run([]string{"-h"}) - - if wasCalled == false { - t.Errorf("Help printer expected to be called, but was not") - } -} - -func TestAppCommandNotFound(t *testing.T) { - beforeRun, subcommandRun := false, false - app := cli.NewApp() - - app.CommandNotFound = func(c *cli.Context, command string) { - beforeRun = true - } - - app.Commands = []cli.Command{ - cli.Command{ - Name: "bar", - Action: func(c *cli.Context) { - subcommandRun = true - }, - }, - } - - app.Run([]string{"command", "foo"}) - - expect(t, beforeRun, true) - expect(t, subcommandRun, false) -} - -func TestGlobalFlagsInSubcommands(t *testing.T) { - subcommandRun := false - app := cli.NewApp() - - app.Flags = []cli.Flag{ - cli.BoolFlag{Name: "debug, d", Usage: "Enable debugging"}, - } - - app.Commands = []cli.Command{ - cli.Command{ - Name: "foo", - Subcommands: []cli.Command{ - { - Name: "bar", - Action: func(c *cli.Context) { - if c.GlobalBool("debug") { - subcommandRun = true - } - }, - }, - }, - }, - } - - app.Run([]string{"command", "-d", "foo", "bar"}) - - expect(t, subcommandRun, true) -} diff --git a/vendor/github.com/codegangsta/cli/appveyor.yml b/vendor/github.com/codegangsta/cli/appveyor.yml new file mode 100644 index 00000000..173086e5 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/appveyor.yml @@ -0,0 +1,25 @@ +version: "{build}" + +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\urfave\cli + +environment: + GOPATH: C:\gopath + GOVERSION: 1.6 + PYTHON: C:\Python27-x64 + PYTHON_VERSION: 2.7.x + PYTHON_ARCH: 64 + GFMXR_DEBUG: 1 + +install: +- set PATH=%GOPATH%\bin;C:\go\bin;%PATH% +- go version +- go env +- go get github.com/urfave/gfmxr/... +- go get -v -t ./... + +build_script: +- python runtests vet +- python runtests test +- python runtests gfmxr diff --git a/vendor/github.com/codegangsta/cli/category.go b/vendor/github.com/codegangsta/cli/category.go new file mode 100644 index 00000000..1a605502 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/category.go @@ -0,0 +1,44 @@ +package cli + +// CommandCategories is a slice of *CommandCategory. +type CommandCategories []*CommandCategory + +// CommandCategory is a category containing commands. +type CommandCategory struct { + Name string + Commands Commands +} + +func (c CommandCategories) Less(i, j int) bool { + return c[i].Name < c[j].Name +} + +func (c CommandCategories) Len() int { + return len(c) +} + +func (c CommandCategories) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// AddCommand adds a command to a category. +func (c CommandCategories) AddCommand(category string, command Command) CommandCategories { + for _, commandCategory := range c { + if commandCategory.Name == category { + commandCategory.Commands = append(commandCategory.Commands, command) + return c + } + } + return append(c, &CommandCategory{Name: category, Commands: []Command{command}}) +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (c *CommandCategory) VisibleCommands() []Command { + ret := []Command{} + for _, command := range c.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} diff --git a/vendor/github.com/codegangsta/cli/cli.go b/vendor/github.com/codegangsta/cli/cli.go index b7425458..f0440c56 100644 --- a/vendor/github.com/codegangsta/cli/cli.go +++ b/vendor/github.com/codegangsta/cli/cli.go @@ -10,7 +10,7 @@ // app := cli.NewApp() // app.Name = "greet" // app.Usage = "say a greeting" -// app.Action = func(c *cli.Context) { +// app.Action = func(c *cli.Context) error { // println("Greetings") // } // diff --git a/vendor/github.com/codegangsta/cli/cli_test.go b/vendor/github.com/codegangsta/cli/cli_test.go deleted file mode 100644 index 879a793d..00000000 --- a/vendor/github.com/codegangsta/cli/cli_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package cli_test - -import ( - "os" - - "github.com/codegangsta/cli" -) - -func Example() { - app := cli.NewApp() - app.Name = "todo" - app.Usage = "task list on the command line" - app.Commands = []cli.Command{ - { - Name: "add", - ShortName: "a", - Usage: "add a task to the list", - Action: func(c *cli.Context) { - println("added task: ", c.Args().First()) - }, - }, - { - Name: "complete", - ShortName: "c", - Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) - }, - }, - } - - app.Run(os.Args) -} - -func ExampleSubcommand() { - app := cli.NewApp() - app.Name = "say" - app.Commands = []cli.Command{ - { - Name: "hello", - ShortName: "hi", - Usage: "use it to see a description", - Description: "This is how we describe hello the function", - Subcommands: []cli.Command{ - { - Name: "english", - ShortName: "en", - Usage: "sends a greeting in english", - Description: "greets someone in english", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "name", - Value: "Bob", - Usage: "Name of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Hello, ", c.String("name")) - }, - }, { - Name: "spanish", - ShortName: "sp", - Usage: "sends a greeting in spanish", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "surname", - Value: "Jones", - Usage: "Surname of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Hola, ", c.String("surname")) - }, - }, { - Name: "french", - ShortName: "fr", - Usage: "sends a greeting in french", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "nickname", - Value: "Stevie", - Usage: "Nickname of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Bonjour, ", c.String("nickname")) - }, - }, - }, - }, { - Name: "bye", - Usage: "says goodbye", - Action: func(c *cli.Context) { - println("bye") - }, - }, - } - - app.Run(os.Args) -} diff --git a/vendor/github.com/codegangsta/cli/command.go b/vendor/github.com/codegangsta/cli/command.go index dcc8de5c..8950ccae 100644 --- a/vendor/github.com/codegangsta/cli/command.go +++ b/vendor/github.com/codegangsta/cli/command.go @@ -3,6 +3,7 @@ package cli import ( "fmt" "io/ioutil" + "sort" "strings" ) @@ -10,37 +11,70 @@ import ( type Command struct { // The name of the command Name string - // short name of the command. Typically one character + // short name of the command. Typically one character (deprecated, use `Aliases`) ShortName string + // A list of aliases for the command + Aliases []string // A short description of the usage of this command Usage string + // Custom text to show on USAGE section of help + UsageText string // A longer explanation of how the command works Description string + // A short description of the arguments of this command + ArgsUsage string + // The category the command is part of + Category string // The function to call when checking for bash command completions - BashComplete func(context *Context) + BashComplete BashCompleteFunc // An action to execute before any sub-subcommands are run, but after the context is ready // If a non-nil error is returned, no sub-subcommands are run - Before func(context *Context) error + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc // The function to call when this command is invoked - Action func(context *Context) + Action interface{} + // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind + // of deprecation period has passed, maybe? + + // Execute this function if a usage error occurs. + OnUsageError OnUsageErrorFunc // List of child commands - Subcommands []Command + Subcommands Commands // List of flags to parse Flags []Flag // Treat all flags as normal arguments if true SkipFlagParsing bool // Boolean to hide built-in help command HideHelp bool + // Boolean to hide this command from help or completion + Hidden bool + + // Full name of command for help, defaults to full command name, including parent commands. + HelpName string + commandNamePath []string } -// Invokes the command given the context, parses ctx.Args() to generate command-specific flags -func (c Command) Run(ctx *Context) error { +// FullName returns the full name of the command. +// For subcommands this ensures that parent commands are part of the command path +func (c Command) FullName() string { + if c.commandNamePath == nil { + return c.Name + } + return strings.Join(c.commandNamePath, " ") +} - if len(c.Subcommands) > 0 || c.Before != nil { +// Commands is a slice of Command +type Commands []Command + +// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags +func (c Command) Run(ctx *Context) (err error) { + if len(c.Subcommands) > 0 { return c.startApp(ctx) } - if !c.HideHelp { + if !c.HideHelp && (HelpFlag != BoolFlag{}) { // append help to flags c.Flags = append( c.Flags, @@ -55,40 +89,65 @@ func (c Command) Run(ctx *Context) error { set := flagSet(c.Name, c.Flags) set.SetOutput(ioutil.Discard) - firstFlagIndex := -1 - for index, arg := range ctx.Args() { - if strings.HasPrefix(arg, "-") { - firstFlagIndex = index - break + if !c.SkipFlagParsing { + firstFlagIndex := -1 + terminatorIndex := -1 + for index, arg := range ctx.Args() { + if arg == "--" { + terminatorIndex = index + break + } else if arg == "-" { + // Do nothing. A dash alone is not really a flag. + continue + } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { + firstFlagIndex = index + } + } + + if firstFlagIndex > -1 { + args := ctx.Args() + regularArgs := make([]string, len(args[1:firstFlagIndex])) + copy(regularArgs, args[1:firstFlagIndex]) + + var flagArgs []string + if terminatorIndex > -1 { + flagArgs = args[firstFlagIndex:terminatorIndex] + regularArgs = append(regularArgs, args[terminatorIndex:]...) + } else { + flagArgs = args[firstFlagIndex:] + } + + err = set.Parse(append(flagArgs, regularArgs...)) + } else { + err = set.Parse(ctx.Args().Tail()) + } + } else { + if c.SkipFlagParsing { + err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...)) } } - var err error - if firstFlagIndex > -1 && !c.SkipFlagParsing { - args := ctx.Args() - regularArgs := args[1:firstFlagIndex] - flagArgs := args[firstFlagIndex:] - err = set.Parse(append(flagArgs, regularArgs...)) - } else { - err = set.Parse(ctx.Args().Tail()) - } - if err != nil { - fmt.Printf("Incorrect Usage.\n\n") + if c.OnUsageError != nil { + err := c.OnUsageError(ctx, err, false) + HandleExitCoder(err) + return err + } + fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.") + fmt.Fprintln(ctx.App.Writer) ShowCommandHelp(ctx, c.Name) - fmt.Println("") return err } nerr := normalizeFlags(c.Flags, set) if nerr != nil { - fmt.Println(nerr) - fmt.Println("") + fmt.Fprintln(ctx.App.Writer, nerr) + fmt.Fprintln(ctx.App.Writer) ShowCommandHelp(ctx, c.Name) - fmt.Println("") return nerr } - context := NewContext(ctx.App, set, ctx.globalSet) + + context := NewContext(ctx.App, set, ctx) if checkCommandCompletions(context, c.Name) { return nil @@ -97,32 +156,101 @@ func (c Command) Run(ctx *Context) error { if checkCommandHelp(context, c.Name) { return nil } + + if c.After != nil { + defer func() { + afterErr := c.After(context) + if afterErr != nil { + HandleExitCoder(err) + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if c.Before != nil { + err = c.Before(context) + if err != nil { + fmt.Fprintln(ctx.App.Writer, err) + fmt.Fprintln(ctx.App.Writer) + ShowCommandHelp(ctx, c.Name) + HandleExitCoder(err) + return err + } + } + context.Command = c - c.Action(context) - return nil + err = HandleAction(c.Action, context) + + if err != nil { + HandleExitCoder(err) + } + return err } -// Returns true if Command.Name or Command.ShortName matches given name +// Names returns the names including short names and aliases. +func (c Command) Names() []string { + names := []string{c.Name} + + if c.ShortName != "" { + names = append(names, c.ShortName) + } + + return append(names, c.Aliases...) +} + +// HasName returns true if Command.Name or Command.ShortName matches given name func (c Command) HasName(name string) bool { - return c.Name == name || c.ShortName == name + for _, n := range c.Names() { + if n == name { + return true + } + } + return false } func (c Command) startApp(ctx *Context) error { app := NewApp() - + app.Metadata = ctx.App.Metadata // set the name and usage app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) + if c.HelpName == "" { + app.HelpName = c.HelpName + } else { + app.HelpName = app.Name + } + if c.Description != "" { app.Usage = c.Description } else { app.Usage = c.Usage } + // set CommandNotFound + app.CommandNotFound = ctx.App.CommandNotFound + // set the flags and commands app.Commands = c.Subcommands app.Flags = c.Flags app.HideHelp = c.HideHelp + app.Version = ctx.App.Version + app.HideVersion = ctx.App.HideVersion + app.Compiled = ctx.App.Compiled + app.Author = ctx.App.Author + app.Email = ctx.App.Email + app.Writer = ctx.App.Writer + + app.categories = CommandCategories{} + for _, command := range c.Subcommands { + app.categories = app.categories.AddCommand(command.Category, command) + } + + sort.Sort(app.categories) + // bash completion app.EnableBashCompletion = ctx.App.EnableBashCompletion if c.BashComplete != nil { @@ -131,11 +259,21 @@ func (c Command) startApp(ctx *Context) error { // set the actions app.Before = c.Before + app.After = c.After if c.Action != nil { app.Action = c.Action } else { app.Action = helpSubcommand.Action } + for index, cc := range app.Commands { + app.Commands[index].commandNamePath = []string{c.Name, cc.Name} + } + return app.RunAsSubcommand(ctx) } + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (c Command) VisibleFlags() []Flag { + return visibleFlags(c.Flags) +} diff --git a/vendor/github.com/codegangsta/cli/command_test.go b/vendor/github.com/codegangsta/cli/command_test.go deleted file mode 100644 index 3afd83e7..00000000 --- a/vendor/github.com/codegangsta/cli/command_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package cli_test - -import ( - "flag" - "github.com/codegangsta/cli" - "testing" -) - -func TestCommandDoNotIgnoreFlags(t *testing.T) { - app := cli.NewApp() - set := flag.NewFlagSet("test", 0) - test := []string{"blah", "blah", "-break"} - set.Parse(test) - - c := cli.NewContext(app, set, set) - - command := cli.Command { - Name: "test-cmd", - ShortName: "tc", - Usage: "this is for testing", - Description: "testing", - Action: func(_ *cli.Context) { }, - } - err := command.Run(c) - - expect(t, err.Error(), "flag provided but not defined: -break") -} - -func TestCommandIgnoreFlags(t *testing.T) { - app := cli.NewApp() - set := flag.NewFlagSet("test", 0) - test := []string{"blah", "blah"} - set.Parse(test) - - c := cli.NewContext(app, set, set) - - command := cli.Command { - Name: "test-cmd", - ShortName: "tc", - Usage: "this is for testing", - Description: "testing", - Action: func(_ *cli.Context) { }, - SkipFlagParsing: true, - } - err := command.Run(c) - - expect(t, err, nil) -} diff --git a/vendor/github.com/codegangsta/cli/context.go b/vendor/github.com/codegangsta/cli/context.go index 1e023cef..c3424636 100644 --- a/vendor/github.com/codegangsta/cli/context.go +++ b/vendor/github.com/codegangsta/cli/context.go @@ -5,6 +5,7 @@ import ( "flag" "strconv" "strings" + "time" ) // Context is a type that is passed through to @@ -12,89 +13,165 @@ import ( // can be used to retrieve context-specific Args and // parsed command-line options. type Context struct { - App *App - Command Command - flagSet *flag.FlagSet - globalSet *flag.FlagSet - setFlags map[string]bool + App *App + Command Command + flagSet *flag.FlagSet + setFlags map[string]bool + globalSetFlags map[string]bool + parentContext *Context } -// Creates a new context. For use in when invoking an App or Command action. -func NewContext(app *App, set *flag.FlagSet, globalSet *flag.FlagSet) *Context { - return &Context{App: app, flagSet: set, globalSet: globalSet} +// NewContext creates a new context. For use in when invoking an App or Command action. +func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { + return &Context{App: app, flagSet: set, parentContext: parentCtx} } -// Looks up the value of a local int flag, returns 0 if no int flag exists +// Int looks up the value of a local int flag, returns 0 if no int flag exists func (c *Context) Int(name string) int { return lookupInt(name, c.flagSet) } -// Looks up the value of a local float64 flag, returns 0 if no float64 flag exists +// Duration looks up the value of a local time.Duration flag, returns 0 if no +// time.Duration flag exists +func (c *Context) Duration(name string) time.Duration { + return lookupDuration(name, c.flagSet) +} + +// Float64 looks up the value of a local float64 flag, returns 0 if no float64 +// flag exists func (c *Context) Float64(name string) float64 { return lookupFloat64(name, c.flagSet) } -// Looks up the value of a local bool flag, returns false if no bool flag exists +// Bool looks up the value of a local bool flag, returns false if no bool flag exists func (c *Context) Bool(name string) bool { return lookupBool(name, c.flagSet) } -// Looks up the value of a local boolT flag, returns false if no bool flag exists +// BoolT looks up the value of a local boolT flag, returns false if no bool flag exists func (c *Context) BoolT(name string) bool { return lookupBoolT(name, c.flagSet) } -// Looks up the value of a local string flag, returns "" if no string flag exists +// String looks up the value of a local string flag, returns "" if no string flag exists func (c *Context) String(name string) string { return lookupString(name, c.flagSet) } -// Looks up the value of a local string slice flag, returns nil if no string slice flag exists +// StringSlice looks up the value of a local string slice flag, returns nil if no +// string slice flag exists func (c *Context) StringSlice(name string) []string { return lookupStringSlice(name, c.flagSet) } -// Looks up the value of a local int slice flag, returns nil if no int slice flag exists +// IntSlice looks up the value of a local int slice flag, returns nil if no int +// slice flag exists func (c *Context) IntSlice(name string) []int { return lookupIntSlice(name, c.flagSet) } -// Looks up the value of a local generic flag, returns nil if no generic flag exists +// Generic looks up the value of a local generic flag, returns nil if no generic +// flag exists func (c *Context) Generic(name string) interface{} { return lookupGeneric(name, c.flagSet) } -// Looks up the value of a global int flag, returns 0 if no int flag exists +// GlobalInt looks up the value of a global int flag, returns 0 if no int flag exists func (c *Context) GlobalInt(name string) int { - return lookupInt(name, c.globalSet) + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt(name, fs) + } + return 0 } -// Looks up the value of a global bool flag, returns false if no bool flag exists +// GlobalFloat64 looks up the value of a global float64 flag, returns float64(0) +// if no float64 flag exists +func (c *Context) GlobalFloat64(name string) float64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupFloat64(name, fs) + } + return float64(0) +} + +// GlobalDuration looks up the value of a global time.Duration flag, returns 0 +// if no time.Duration flag exists +func (c *Context) GlobalDuration(name string) time.Duration { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupDuration(name, fs) + } + return 0 +} + +// GlobalBool looks up the value of a global bool flag, returns false if no bool +// flag exists func (c *Context) GlobalBool(name string) bool { - return lookupBool(name, c.globalSet) + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBool(name, fs) + } + return false } -// Looks up the value of a global string flag, returns "" if no string flag exists +// GlobalBoolT looks up the value of a global bool flag, returns true if no bool +// flag exists +func (c *Context) GlobalBoolT(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBoolT(name, fs) + } + return false +} + +// GlobalString looks up the value of a global string flag, returns "" if no +// string flag exists func (c *Context) GlobalString(name string) string { - return lookupString(name, c.globalSet) + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupString(name, fs) + } + return "" } -// Looks up the value of a global string slice flag, returns nil if no string slice flag exists +// GlobalStringSlice looks up the value of a global string slice flag, returns +// nil if no string slice flag exists func (c *Context) GlobalStringSlice(name string) []string { - return lookupStringSlice(name, c.globalSet) + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupStringSlice(name, fs) + } + return nil } -// Looks up the value of a global int slice flag, returns nil if no int slice flag exists +// GlobalIntSlice looks up the value of a global int slice flag, returns nil if +// no int slice flag exists func (c *Context) GlobalIntSlice(name string) []int { - return lookupIntSlice(name, c.globalSet) + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupIntSlice(name, fs) + } + return nil } -// Looks up the value of a global generic flag, returns nil if no generic flag exists +// GlobalGeneric looks up the value of a global generic flag, returns nil if no +// generic flag exists func (c *Context) GlobalGeneric(name string) interface{} { - return lookupGeneric(name, c.globalSet) + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupGeneric(name, fs) + } + return nil } -// Determines if the flag was actually set exists +// NumFlags returns the number of flags set +func (c *Context) NumFlags() int { + return c.flagSet.NFlag() +} + +// Set sets a context flag to a value. +func (c *Context) Set(name, value string) error { + return c.flagSet.Set(name, value) +} + +// GlobalSet sets a context flag to a value on the global flagset +func (c *Context) GlobalSet(name, value string) error { + return globalContext(c).flagSet.Set(name, value) +} + +// IsSet determines if the flag was actually set func (c *Context) IsSet(name string) bool { if c.setFlags == nil { c.setFlags = make(map[string]bool) @@ -105,15 +182,67 @@ func (c *Context) IsSet(name string) bool { return c.setFlags[name] == true } +// GlobalIsSet determines if the global flag was actually set +func (c *Context) GlobalIsSet(name string) bool { + if c.globalSetFlags == nil { + c.globalSetFlags = make(map[string]bool) + ctx := c + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + for ; ctx != nil && c.globalSetFlags[name] == false; ctx = ctx.parentContext { + ctx.flagSet.Visit(func(f *flag.Flag) { + c.globalSetFlags[f.Name] = true + }) + } + } + return c.globalSetFlags[name] +} + +// FlagNames returns a slice of flag names used in this context. +func (c *Context) FlagNames() (names []string) { + for _, flag := range c.Command.Flags { + name := strings.Split(flag.GetName(), ",")[0] + if name == "help" { + continue + } + names = append(names, name) + } + return +} + +// GlobalFlagNames returns a slice of global flag names used by the app. +func (c *Context) GlobalFlagNames() (names []string) { + for _, flag := range c.App.Flags { + name := strings.Split(flag.GetName(), ",")[0] + if name == "help" || name == "version" { + continue + } + names = append(names, name) + } + return +} + +// Parent returns the parent context, if any +func (c *Context) Parent() *Context { + return c.parentContext +} + +// Args contains apps console arguments type Args []string -// Returns the command line arguments associated with the context. +// Args returns the command line arguments associated with the context. func (c *Context) Args() Args { args := Args(c.flagSet.Args()) return args } -// Returns the nth argument, or else a blank string +// NArg returns the number of the command line arguments. +func (c *Context) NArg() int { + return len(c.Args()) +} + +// Get returns the nth argument, or else a blank string func (a Args) Get(n int) string { if len(a) > n { return a[n] @@ -121,12 +250,12 @@ func (a Args) Get(n int) string { return "" } -// Returns the first argument, or else a blank string +// First returns the first argument, or else a blank string func (a Args) First() string { return a.Get(0) } -// Return the rest of the arguments (not the first one) +// Tail returns the rest of the arguments (not the first one) // or else an empty string slice func (a Args) Tail() []string { if len(a) >= 2 { @@ -135,12 +264,12 @@ func (a Args) Tail() []string { return []string{} } -// Checks if there are any arguments present +// Present checks if there are any arguments present func (a Args) Present() bool { return len(a) != 0 } -// Swaps arguments at the given indexes +// Swap swaps arguments at the given indexes func (a Args) Swap(from, to int) error { if from >= len(a) || to >= len(a) { return errors.New("index out of range") @@ -149,6 +278,31 @@ func (a Args) Swap(from, to int) error { return nil } +func globalContext(ctx *Context) *Context { + if ctx == nil { + return nil + } + + for { + if ctx.parentContext == nil { + return ctx + } + ctx = ctx.parentContext + } +} + +func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet { + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + for ; ctx != nil; ctx = ctx.parentContext { + if f := ctx.flagSet.Lookup(name); f != nil { + return ctx.flagSet + } + } + return nil +} + func lookupInt(name string, set *flag.FlagSet) int { f := set.Lookup(name) if f != nil { @@ -162,6 +316,18 @@ func lookupInt(name string, set *flag.FlagSet) int { return 0 } +func lookupDuration(name string, set *flag.FlagSet) time.Duration { + f := set.Lookup(name) + if f != nil { + val, err := time.ParseDuration(f.Value.String()) + if err == nil { + return val + } + } + + return 0 +} + func lookupFloat64(name string, set *flag.FlagSet) float64 { f := set.Lookup(name) if f != nil { @@ -252,7 +418,7 @@ func normalizeFlags(flags []Flag, set *flag.FlagSet) error { visited[f.Name] = true }) for _, f := range flags { - parts := strings.Split(f.getName(), ",") + parts := strings.Split(f.GetName(), ",") if len(parts) == 1 { continue } diff --git a/vendor/github.com/codegangsta/cli/context_test.go b/vendor/github.com/codegangsta/cli/context_test.go deleted file mode 100644 index 89041b99..00000000 --- a/vendor/github.com/codegangsta/cli/context_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package cli_test - -import ( - "flag" - "github.com/codegangsta/cli" - "testing" -) - -func TestNewContext(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Int("myflag", 12, "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Int("myflag", 42, "doc") - command := cli.Command{Name: "mycommand"} - c := cli.NewContext(nil, set, globalSet) - c.Command = command - expect(t, c.Int("myflag"), 12) - expect(t, c.GlobalInt("myflag"), 42) - expect(t, c.Command.Name, "mycommand") -} - -func TestContext_Int(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Int("myflag", 12, "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.Int("myflag"), 12) -} - -func TestContext_String(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.String("myflag", "hello world", "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.String("myflag"), "hello world") -} - -func TestContext_Bool(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.Bool("myflag"), false) -} - -func TestContext_BoolT(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", true, "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.BoolT("myflag"), true) -} - -func TestContext_Args(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - c := cli.NewContext(nil, set, set) - set.Parse([]string{"--myflag", "bat", "baz"}) - expect(t, len(c.Args()), 2) - expect(t, c.Bool("myflag"), true) -} - -func TestContext_IsSet(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - set.String("otherflag", "hello world", "doc") - c := cli.NewContext(nil, set, set) - set.Parse([]string{"--myflag", "bat", "baz"}) - expect(t, c.IsSet("myflag"), true) - expect(t, c.IsSet("otherflag"), false) - expect(t, c.IsSet("bogusflag"), false) -} diff --git a/vendor/github.com/codegangsta/cli/errors.go b/vendor/github.com/codegangsta/cli/errors.go new file mode 100644 index 00000000..ea551be1 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/errors.go @@ -0,0 +1,92 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OsExiter is the function used when the app exits. If not set defaults to os.Exit. +var OsExiter = os.Exit + +// ErrWriter is used to write errors to the user. This can be anything +// implementing the io.Writer interface and defaults to os.Stderr. +var ErrWriter io.Writer = os.Stderr + +// MultiError is an error that wraps multiple errors. +type MultiError struct { + Errors []error +} + +// NewMultiError creates a new MultiError. Pass in one or more errors. +func NewMultiError(err ...error) MultiError { + return MultiError{Errors: err} +} + +// Error implents the error interface. +func (m MultiError) Error() string { + errs := make([]string, len(m.Errors)) + for i, err := range m.Errors { + errs[i] = err.Error() + } + + return strings.Join(errs, "\n") +} + +// ExitCoder is the interface checked by `App` and `Command` for a custom exit +// code +type ExitCoder interface { + error + ExitCode() int +} + +// ExitError fulfills both the builtin `error` interface and `ExitCoder` +type ExitError struct { + exitCode int + message string +} + +// NewExitError makes a new *ExitError +func NewExitError(message string, exitCode int) *ExitError { + return &ExitError{ + exitCode: exitCode, + message: message, + } +} + +// Error returns the string message, fulfilling the interface required by +// `error` +func (ee *ExitError) Error() string { + return ee.message +} + +// ExitCode returns the exit code, fulfilling the interface required by +// `ExitCoder` +func (ee *ExitError) ExitCode() int { + return ee.exitCode +} + +// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if +// so prints the error to stderr (if it is non-empty) and calls OsExiter with the +// given exit code. If the given error is a MultiError, then this func is +// called on all members of the Errors slice. +func HandleExitCoder(err error) { + if err == nil { + return + } + + if exitErr, ok := err.(ExitCoder); ok { + if err.Error() != "" { + fmt.Fprintln(ErrWriter, err) + } + OsExiter(exitErr.ExitCode()) + return + } + + if multiErr, ok := err.(MultiError); ok { + for _, merr := range multiErr.Errors { + HandleExitCoder(merr) + } + } +} diff --git a/vendor/github.com/codegangsta/cli/flag.go b/vendor/github.com/codegangsta/cli/flag.go index 60353e22..1e8112e7 100644 --- a/vendor/github.com/codegangsta/cli/flag.go +++ b/vendor/github.com/codegangsta/cli/flag.go @@ -4,35 +4,47 @@ import ( "flag" "fmt" "os" + "reflect" + "runtime" "strconv" "strings" + "time" ) -// This flag enables bash-completion for all commands and subcommands +const defaultPlaceholder = "value" + +// BashCompletionFlag enables bash-completion for all commands and subcommands var BashCompletionFlag = BoolFlag{ - Name: "generate-bash-completion", + Name: "generate-bash-completion", + Hidden: true, } -// This flag prints the version for the application +// VersionFlag prints the version for the application var VersionFlag = BoolFlag{ Name: "version, v", Usage: "print the version", } -// This flag prints the help for all commands and subcommands +// HelpFlag prints the help for all commands and subcommands +// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand +// unless HideHelp is set to true) var HelpFlag = BoolFlag{ Name: "help, h", Usage: "show help", } +// FlagStringer converts a flag definition to a string. This is used by help +// to display a flag. +var FlagStringer FlagStringFunc = stringifyFlag + // Flag is a common interface related to parsing flags in cli. -// For more advanced flag parsing techniques, it is recomended that +// For more advanced flag parsing techniques, it is recommended that // this interface be implemented. type Flag interface { fmt.Stringer // Apply Flag settings to the given flag set Apply(*flag.FlagSet) - getName() string + GetName() string } func flagSet(name string, flags []Flag) *flag.FlagSet { @@ -64,17 +76,27 @@ type GenericFlag struct { Value Generic Usage string EnvVar string + Hidden bool } +// String returns the string representation of the generic flag to display the +// help text to the user (uses the String() method of the generic flag to show +// the value) func (f GenericFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s%s %v\t`%v` %s", prefixFor(f.Name), f.Name, f.Value, "-"+f.Name+" option -"+f.Name+" option", f.Usage)) + return FlagStringer(f) } +// Apply takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag func (f GenericFlag) Apply(set *flag.FlagSet) { val := f.Value if f.EnvVar != "" { - if envVal := os.Getenv(f.EnvVar); envVal != "" { - val.Set(envVal) + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + val.Set(envVal) + break + } } } @@ -83,271 +105,420 @@ func (f GenericFlag) Apply(set *flag.FlagSet) { }) } -func (f GenericFlag) getName() string { +// GetName returns the name of a flag. +func (f GenericFlag) GetName() string { return f.Name } +// StringSlice is an opaque type for []string to satisfy flag.Value type StringSlice []string +// Set appends the string value to the list of values func (f *StringSlice) Set(value string) error { *f = append(*f, value) return nil } +// String returns a readable representation of this value (for usage defaults) func (f *StringSlice) String() string { return fmt.Sprintf("%s", *f) } +// Value returns the slice of strings set by this flag func (f *StringSlice) Value() []string { return *f } +// StringSliceFlag is a string flag that can be specified multiple times on the +// command-line type StringSliceFlag struct { Name string Value *StringSlice Usage string EnvVar string + Hidden bool } +// String returns the usage func (f StringSliceFlag) String() string { - firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") - pref := prefixFor(firstName) - return withEnvHint(f.EnvVar, fmt.Sprintf("%s '%v'\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) + return FlagStringer(f) } +// Apply populates the flag given the flag set and environment func (f StringSliceFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { - if envVal := os.Getenv(f.EnvVar); envVal != "" { - newVal := &StringSlice{} - for _, s := range strings.Split(envVal, ",") { - newVal.Set(s) + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + newVal := &StringSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + newVal.Set(s) + } + f.Value = newVal + break } - f.Value = newVal } } eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &StringSlice{} + } set.Var(f.Value, name, f.Usage) }) } -func (f StringSliceFlag) getName() string { +// GetName returns the name of a flag. +func (f StringSliceFlag) GetName() string { return f.Name } +// IntSlice is an opaque type for []int to satisfy flag.Value type IntSlice []int +// Set parses the value into an integer and appends it to the list of values func (f *IntSlice) Set(value string) error { - tmp, err := strconv.Atoi(value) if err != nil { return err - } else { - *f = append(*f, tmp) } + *f = append(*f, tmp) return nil } +// String returns a readable representation of this value (for usage defaults) func (f *IntSlice) String() string { return fmt.Sprintf("%d", *f) } +// Value returns the slice of ints set by this flag func (f *IntSlice) Value() []int { return *f } +// IntSliceFlag is an int flag that can be specified multiple times on the +// command-line type IntSliceFlag struct { Name string Value *IntSlice Usage string EnvVar string + Hidden bool } +// String returns the usage func (f IntSliceFlag) String() string { - firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") - pref := prefixFor(firstName) - return withEnvHint(f.EnvVar, fmt.Sprintf("%s '%v'\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) + return FlagStringer(f) } +// Apply populates the flag given the flag set and environment func (f IntSliceFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { - if envVal := os.Getenv(f.EnvVar); envVal != "" { - newVal := &IntSlice{} - for _, s := range strings.Split(envVal, ",") { - err := newVal.Set(s) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + newVal := &IntSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + err := newVal.Set(s) + if err != nil { + fmt.Fprintf(ErrWriter, err.Error()) + } } + f.Value = newVal + break } - f.Value = newVal } } eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &IntSlice{} + } set.Var(f.Value, name, f.Usage) }) } -func (f IntSliceFlag) getName() string { +// GetName returns the name of the flag. +func (f IntSliceFlag) GetName() string { return f.Name } +// BoolFlag is a switch that defaults to false type BoolFlag struct { - Name string - Usage string - EnvVar string + Name string + Usage string + EnvVar string + Destination *bool + Hidden bool } +// String returns a readable representation of this value (for usage defaults) func (f BoolFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) + return FlagStringer(f) } +// Apply populates the flag given the flag set and environment func (f BoolFlag) Apply(set *flag.FlagSet) { val := false if f.EnvVar != "" { - if envVal := os.Getenv(f.EnvVar); envVal != "" { - envValBool, err := strconv.ParseBool(envVal) - if err == nil { - val = envValBool + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValBool, err := strconv.ParseBool(envVal) + if err == nil { + val = envValBool + } + break } } } eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } set.Bool(name, val, f.Usage) }) } -func (f BoolFlag) getName() string { +// GetName returns the name of the flag. +func (f BoolFlag) GetName() string { return f.Name } +// BoolTFlag this represents a boolean flag that is true by default, but can +// still be set to false by --some-flag=false type BoolTFlag struct { - Name string - Usage string - EnvVar string + Name string + Usage string + EnvVar string + Destination *bool + Hidden bool } +// String returns a readable representation of this value (for usage defaults) func (f BoolTFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) + return FlagStringer(f) } +// Apply populates the flag given the flag set and environment func (f BoolTFlag) Apply(set *flag.FlagSet) { val := true if f.EnvVar != "" { - if envVal := os.Getenv(f.EnvVar); envVal != "" { - envValBool, err := strconv.ParseBool(envVal) - if err == nil { - val = envValBool + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValBool, err := strconv.ParseBool(envVal) + if err == nil { + val = envValBool + break + } } } } eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } set.Bool(name, val, f.Usage) }) } -func (f BoolTFlag) getName() string { +// GetName returns the name of the flag. +func (f BoolTFlag) GetName() string { return f.Name } +// StringFlag represents a flag that takes as string value type StringFlag struct { - Name string - Value string - Usage string - EnvVar string + Name string + Value string + Usage string + EnvVar string + Destination *string + Hidden bool } +// String returns the usage func (f StringFlag) String() string { - var fmtString string - fmtString = "%s %v\t%v" - - if len(f.Value) > 0 { - fmtString = "%s '%v'\t%v" - } else { - fmtString = "%s %v\t%v" - } - - return withEnvHint(f.EnvVar, fmt.Sprintf(fmtString, prefixedNames(f.Name), f.Value, f.Usage)) + return FlagStringer(f) } +// Apply populates the flag given the flag set and environment func (f StringFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { - if envVal := os.Getenv(f.EnvVar); envVal != "" { - f.Value = envVal + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + f.Value = envVal + break + } } } eachName(f.Name, func(name string) { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + return + } set.String(name, f.Value, f.Usage) }) } -func (f StringFlag) getName() string { +// GetName returns the name of the flag. +func (f StringFlag) GetName() string { return f.Name } +// IntFlag is a flag that takes an integer +// Errors if the value provided cannot be parsed type IntFlag struct { - Name string - Value int - Usage string - EnvVar string + Name string + Value int + Usage string + EnvVar string + Destination *int + Hidden bool } +// String returns the usage func (f IntFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s '%v'\t%v", prefixedNames(f.Name), f.Value, f.Usage)) + return FlagStringer(f) } +// Apply populates the flag given the flag set and environment func (f IntFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { - if envVal := os.Getenv(f.EnvVar); envVal != "" { - envValInt, err := strconv.ParseUint(envVal, 10, 64) - if err == nil { - f.Value = int(envValInt) + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err == nil { + f.Value = int(envValInt) + break + } } } } eachName(f.Name, func(name string) { + if f.Destination != nil { + set.IntVar(f.Destination, name, f.Value, f.Usage) + return + } set.Int(name, f.Value, f.Usage) }) } -func (f IntFlag) getName() string { +// GetName returns the name of the flag. +func (f IntFlag) GetName() string { return f.Name } -type Float64Flag struct { - Name string - Value float64 - Usage string - EnvVar string +// DurationFlag is a flag that takes a duration specified in Go's duration +// format: https://golang.org/pkg/time/#ParseDuration +type DurationFlag struct { + Name string + Value time.Duration + Usage string + EnvVar string + Destination *time.Duration + Hidden bool } -func (f Float64Flag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s '%v'\t%v", prefixedNames(f.Name), f.Value, f.Usage)) +// String returns a readable representation of this value (for usage defaults) +func (f DurationFlag) String() string { + return FlagStringer(f) } -func (f Float64Flag) Apply(set *flag.FlagSet) { +// Apply populates the flag given the flag set and environment +func (f DurationFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { - if envVal := os.Getenv(f.EnvVar); envVal != "" { - envValFloat, err := strconv.ParseFloat(envVal, 10) - if err == nil { - f.Value = float64(envValFloat) + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValDuration, err := time.ParseDuration(envVal) + if err == nil { + f.Value = envValDuration + break + } } } } eachName(f.Name, func(name string) { + if f.Destination != nil { + set.DurationVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Duration(name, f.Value, f.Usage) + }) +} + +// GetName returns the name of the flag. +func (f DurationFlag) GetName() string { + return f.Name +} + +// Float64Flag is a flag that takes an float value +// Errors if the value provided cannot be parsed +type Float64Flag struct { + Name string + Value float64 + Usage string + EnvVar string + Destination *float64 + Hidden bool +} + +// String returns the usage +func (f Float64Flag) String() string { + return FlagStringer(f) +} + +// Apply populates the flag given the flag set and environment +func (f Float64Flag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValFloat, err := strconv.ParseFloat(envVal, 10) + if err == nil { + f.Value = float64(envValFloat) + } + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Float64Var(f.Destination, name, f.Value, f.Usage) + return + } set.Float64(name, f.Value, f.Usage) }) } -func (f Float64Flag) getName() string { +// GetName returns the name of the flag. +func (f Float64Flag) GetName() string { return f.Name } +func visibleFlags(fl []Flag) []Flag { + visible := []Flag{} + for _, flag := range fl { + if !reflect.ValueOf(flag).FieldByName("Hidden").Bool() { + visible = append(visible, flag) + } + } + return visible +} + func prefixFor(name string) (prefix string) { if len(name) == 1 { prefix = "-" @@ -358,22 +529,131 @@ func prefixFor(name string) (prefix string) { return } -func prefixedNames(fullName string) (prefixed string) { +// Returns the placeholder, if any, and the unquoted usage string. +func unquoteUsage(usage string) (string, string) { + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name := usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break + } + } + return "", usage +} + +func prefixedNames(fullName, placeholder string) string { + var prefixed string parts := strings.Split(fullName, ",") for i, name := range parts { name = strings.Trim(name, " ") prefixed += prefixFor(name) + name + if placeholder != "" { + prefixed += " " + placeholder + } if i < len(parts)-1 { prefixed += ", " } } - return + return prefixed } func withEnvHint(envVar, str string) string { envText := "" if envVar != "" { - envText = fmt.Sprintf(" [$%s]", envVar) + prefix := "$" + suffix := "" + sep := ", $" + if runtime.GOOS == "windows" { + prefix = "%" + suffix = "%" + sep = "%, %" + } + envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(strings.Split(envVar, ","), sep), suffix) } return str + envText } + +func stringifyFlag(f Flag) string { + fv := reflect.ValueOf(f) + + switch f.(type) { + case IntSliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyIntSliceFlag(f.(IntSliceFlag))) + case StringSliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyStringSliceFlag(f.(StringSliceFlag))) + } + + placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String()) + + needsPlaceholder := false + defaultValueString := "" + val := fv.FieldByName("Value") + + if val.IsValid() { + needsPlaceholder = true + defaultValueString = fmt.Sprintf(" (default: %v)", val.Interface()) + + if val.Kind() == reflect.String && val.String() != "" { + defaultValueString = fmt.Sprintf(" (default: %q)", val.String()) + } + } + + if defaultValueString == " (default: )" { + defaultValueString = "" + } + + if needsPlaceholder && placeholder == "" { + placeholder = defaultPlaceholder + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultValueString)) + + return withEnvHint(fv.FieldByName("EnvVar").String(), + fmt.Sprintf("%s\t%s", prefixedNames(fv.FieldByName("Name").String(), placeholder), usageWithDefault)) +} + +func stringifyIntSliceFlag(f IntSliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifyStringSliceFlag(f StringSliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, s := range f.Value.Value() { + if len(s) > 0 { + defaultVals = append(defaultVals, fmt.Sprintf("%q", s)) + } + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifySliceFlag(usage, name string, defaultVals []string) string { + placeholder, usage := unquoteUsage(usage) + if placeholder == "" { + placeholder = defaultPlaceholder + } + + defaultVal := "" + if len(defaultVals) > 0 { + defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", ")) + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal)) + return fmt.Sprintf("%s\t%s", prefixedNames(name, placeholder), usageWithDefault) +} diff --git a/vendor/github.com/codegangsta/cli/flag_test.go b/vendor/github.com/codegangsta/cli/flag_test.go deleted file mode 100644 index 41032361..00000000 --- a/vendor/github.com/codegangsta/cli/flag_test.go +++ /dev/null @@ -1,554 +0,0 @@ -package cli_test - -import ( - "github.com/codegangsta/cli" - - "fmt" - "os" - "reflect" - "strings" - "testing" -) - -var boolFlagTests = []struct { - name string - expected string -}{ - {"help", "--help\t"}, - {"h", "-h\t"}, -} - -func TestBoolFlagHelpOutput(t *testing.T) { - - for _, test := range boolFlagTests { - flag := cli.BoolFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -var stringFlagTests = []struct { - name string - value string - expected string -}{ - {"help", "", "--help \t"}, - {"h", "", "-h \t"}, - {"h", "", "-h \t"}, - {"test", "Something", "--test 'Something'\t"}, -} - -func TestStringFlagHelpOutput(t *testing.T) { - - for _, test := range stringFlagTests { - flag := cli.StringFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestStringFlagWithEnvVarHelpOutput(t *testing.T) { - - os.Setenv("APP_FOO", "derp") - for _, test := range stringFlagTests { - flag := cli.StringFlag{Name: test.name, Value: test.value, EnvVar: "APP_FOO"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_FOO]") { - t.Errorf("%s does not end with [$APP_FOO]", output) - } - } -} - -var stringSliceFlagTests = []struct { - name string - value *cli.StringSlice - expected string -}{ - {"help", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("") - return s - }(), "--help '--help option --help option'\t"}, - {"h", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("") - return s - }(), "-h '-h option -h option'\t"}, - {"h", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("") - return s - }(), "-h '-h option -h option'\t"}, - {"test", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("Something") - return s - }(), "--test '--test option --test option'\t"}, -} - -func TestStringSliceFlagHelpOutput(t *testing.T) { - - for _, test := range stringSliceFlagTests { - flag := cli.StringSliceFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestStringSliceFlagWithEnvVarHelpOutput(t *testing.T) { - - os.Setenv("APP_QWWX", "11,4") - for _, test := range stringSliceFlagTests { - flag := cli.StringSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_QWWX"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_QWWX]") { - t.Errorf("%q does not end with [$APP_QWWX]", output) - } - } -} - -var intFlagTests = []struct { - name string - expected string -}{ - {"help", "--help '0'\t"}, - {"h", "-h '0'\t"}, -} - -func TestIntFlagHelpOutput(t *testing.T) { - - for _, test := range intFlagTests { - flag := cli.IntFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestIntFlagWithEnvVarHelpOutput(t *testing.T) { - - os.Setenv("APP_BAR", "2") - for _, test := range intFlagTests { - flag := cli.IntFlag{Name: test.name, EnvVar: "APP_BAR"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAR]") { - t.Errorf("%s does not end with [$APP_BAR]", output) - } - } -} - -var intSliceFlagTests = []struct { - name string - value *cli.IntSlice - expected string -}{ - {"help", &cli.IntSlice{}, "--help '--help option --help option'\t"}, - {"h", &cli.IntSlice{}, "-h '-h option -h option'\t"}, - {"h", &cli.IntSlice{}, "-h '-h option -h option'\t"}, - {"test", func() *cli.IntSlice { - i := &cli.IntSlice{} - i.Set("9") - return i - }(), "--test '--test option --test option'\t"}, -} - -func TestIntSliceFlagHelpOutput(t *testing.T) { - - for _, test := range intSliceFlagTests { - flag := cli.IntSliceFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestIntSliceFlagWithEnvVarHelpOutput(t *testing.T) { - - os.Setenv("APP_SMURF", "42,3") - for _, test := range intSliceFlagTests { - flag := cli.IntSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_SMURF]") { - t.Errorf("%q does not end with [$APP_SMURF]", output) - } - } -} - -var float64FlagTests = []struct { - name string - expected string -}{ - {"help", "--help '0'\t"}, - {"h", "-h '0'\t"}, -} - -func TestFloat64FlagHelpOutput(t *testing.T) { - - for _, test := range float64FlagTests { - flag := cli.Float64Flag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestFloat64FlagWithEnvVarHelpOutput(t *testing.T) { - - os.Setenv("APP_BAZ", "99.4") - for _, test := range float64FlagTests { - flag := cli.Float64Flag{Name: test.name, EnvVar: "APP_BAZ"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAZ]") { - t.Errorf("%s does not end with [$APP_BAZ]", output) - } - } -} - -var genericFlagTests = []struct { - name string - value cli.Generic - expected string -}{ - {"help", &Parser{}, "--help \t`-help option -help option` "}, - {"h", &Parser{}, "-h \t`-h option -h option` "}, - {"test", &Parser{}, "--test \t`-test option -test option` "}, -} - -func TestGenericFlagHelpOutput(t *testing.T) { - - for _, test := range genericFlagTests { - flag := cli.GenericFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestGenericFlagWithEnvVarHelpOutput(t *testing.T) { - - os.Setenv("APP_ZAP", "3") - for _, test := range genericFlagTests { - flag := cli.GenericFlag{Name: test.name, EnvVar: "APP_ZAP"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_ZAP]") { - t.Errorf("%s does not end with [$APP_ZAP]", output) - } - } -} - -func TestParseMultiString(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.StringFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.String("serve") != "10" { - t.Errorf("main name not set") - } - if ctx.String("s") != "10" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10"}) -} - -func TestParseMultiStringFromEnv(t *testing.T) { - os.Setenv("APP_COUNT", "20") - (&cli.App{ - Flags: []cli.Flag{ - cli.StringFlag{Name: "count, c", EnvVar: "APP_COUNT"}, - }, - Action: func(ctx *cli.Context) { - if ctx.String("count") != "20" { - t.Errorf("main name not set") - } - if ctx.String("c") != "20" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringSlice(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.StringSliceFlag{Name: "serve, s", Value: &cli.StringSlice{}}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.StringSlice("serve"), []string{"10", "20"}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.StringSlice("s"), []string{"10", "20"}) { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10", "-s", "20"}) -} - -func TestParseMultiStringSliceFromEnv(t *testing.T) { - os.Setenv("APP_INTERVALS", "20,30,40") - - (&cli.App{ - Flags: []cli.Flag{ - cli.StringSliceFlag{Name: "intervals, i", Value: &cli.StringSlice{}, EnvVar: "APP_INTERVALS"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiInt(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.IntFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Int("serve") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("s") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10"}) -} - -func TestParseMultiIntFromEnv(t *testing.T) { - os.Setenv("APP_TIMEOUT_SECONDS", "10") - a := cli.App{ - Flags: []cli.Flag{ - cli.IntFlag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Int("timeout") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("t") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiIntSlice(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "serve, s", Value: &cli.IntSlice{}}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.IntSlice("serve"), []int{10, 20}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.IntSlice("s"), []int{10, 20}) { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10", "-s", "20"}) -} - -func TestParseMultiIntSliceFromEnv(t *testing.T) { - os.Setenv("APP_INTERVALS", "20,30,40") - - (&cli.App{ - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "intervals, i", Value: &cli.IntSlice{}, EnvVar: "APP_INTERVALS"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiFloat64(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.Float64Flag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Float64("serve") != 10.2 { - t.Errorf("main name not set") - } - if ctx.Float64("s") != 10.2 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10.2"}) -} - -func TestParseMultiFloat64FromEnv(t *testing.T) { - os.Setenv("APP_TIMEOUT_SECONDS", "15.5") - a := cli.App{ - Flags: []cli.Flag{ - cli.Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Float64("timeout") != 15.5 { - t.Errorf("main name not set") - } - if ctx.Float64("t") != 15.5 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBool(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Bool("serve") != true { - t.Errorf("main name not set") - } - if ctx.Bool("s") != true { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "--serve"}) -} - -func TestParseMultiBoolFromEnv(t *testing.T) { - os.Setenv("APP_DEBUG", "1") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Bool("debug") != true { - t.Errorf("main name not set from env") - } - if ctx.Bool("d") != true { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolT(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolTFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.BoolT("serve") != true { - t.Errorf("main name not set") - } - if ctx.BoolT("s") != true { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "--serve"}) -} - -func TestParseMultiBoolTFromEnv(t *testing.T) { - os.Setenv("APP_DEBUG", "0") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolTFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, - }, - Action: func(ctx *cli.Context) { - if ctx.BoolT("debug") != false { - t.Errorf("main name not set from env") - } - if ctx.BoolT("d") != false { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -type Parser [2]string - -func (p *Parser) Set(value string) error { - parts := strings.Split(value, ",") - if len(parts) != 2 { - return fmt.Errorf("invalid format") - } - - (*p)[0] = parts[0] - (*p)[1] = parts[1] - - return nil -} - -func (p *Parser) String() string { - return fmt.Sprintf("%s,%s", p[0], p[1]) -} - -func TestParseGeneric(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.GenericFlag{Name: "serve, s", Value: &Parser{}}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"10", "20"}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"10", "20"}) { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10,20"}) -} - -func TestParseGenericFromEnv(t *testing.T) { - os.Setenv("APP_SERVE", "20,30") - a := cli.App{ - Flags: []cli.Flag{ - cli.GenericFlag{Name: "serve, s", Value: &Parser{}, EnvVar: "APP_SERVE"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"20", "30"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"20", "30"}) { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} diff --git a/vendor/github.com/codegangsta/cli/funcs.go b/vendor/github.com/codegangsta/cli/funcs.go new file mode 100644 index 00000000..cba5e6cb --- /dev/null +++ b/vendor/github.com/codegangsta/cli/funcs.go @@ -0,0 +1,28 @@ +package cli + +// BashCompleteFunc is an action to execute when the bash-completion flag is set +type BashCompleteFunc func(*Context) + +// BeforeFunc is an action to execute before any subcommands are run, but after +// the context is ready if a non-nil error is returned, no subcommands are run +type BeforeFunc func(*Context) error + +// AfterFunc is an action to execute after any subcommands are run, but after the +// subcommand has finished it is run even if Action() panics +type AfterFunc func(*Context) error + +// ActionFunc is the action to execute when no subcommands are specified +type ActionFunc func(*Context) error + +// CommandNotFoundFunc is executed if the proper command cannot be found +type CommandNotFoundFunc func(*Context, string) + +// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying +// customized usage error messages. This function is able to replace the +// original error messages. If this function is not set, the "Incorrect usage" +// is displayed and the execution is interrupted. +type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error + +// FlagStringFunc is used by the help generation to display a flag, which is +// expected to be a single line. +type FlagStringFunc func(Flag) string diff --git a/vendor/github.com/codegangsta/cli/help.go b/vendor/github.com/codegangsta/cli/help.go index ccca0362..a9e7327e 100644 --- a/vendor/github.com/codegangsta/cli/help.go +++ b/vendor/github.com/codegangsta/cli/help.go @@ -2,137 +2,175 @@ package cli import ( "fmt" + "io" "os" + "strings" "text/tabwriter" "text/template" ) -// The text template for the Default help topic. +// AppHelpTemplate is the text template for the Default help topic. // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var AppHelpTemplate = `NAME: {{.Name}} - {{.Usage}} USAGE: - {{.Name}} {{ if .Flags }}[global options] {{ end }}command{{ if .Flags }} [command options]{{ end }} [arguments...] - + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}} + {{if .Version}}{{if not .HideVersion}} VERSION: {{.Version}} - -COMMANDS: - {{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} - {{end}}{{ if .Flags }} + {{end}}{{end}}{{if len .Authors}} +AUTHOR(S): + {{range .Authors}}{{.}}{{end}} + {{end}}{{if .VisibleCommands}} +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}} +{{end}}{{end}}{{if .VisibleFlags}} GLOBAL OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{ end }} + {{range .VisibleFlags}}{{.}} + {{end}}{{end}}{{if .Copyright}} +COPYRIGHT: + {{.Copyright}} + {{end}} ` -// The text template for the command help topic. +// CommandHelpTemplate is the text template for the command help topic. // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var CommandHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} + {{.HelpName}} - {{.Usage}} USAGE: - command {{.Name}}{{ if .Flags }} [command options]{{ end }} [arguments...] + {{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} DESCRIPTION: - {{.Description}}{{ if .Flags }} + {{.Description}}{{end}}{{if .VisibleFlags}} OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{ end }} + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} ` -// The text template for the subcommand help topic. +// SubcommandHelpTemplate is the text template for the subcommand help topic. // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var SubcommandHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} + {{.HelpName}} - {{.Usage}} USAGE: - {{.Name}} command{{ if .Flags }} [command options]{{ end }} [arguments...] + {{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} -COMMANDS: - {{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} - {{end}}{{ if .Flags }} +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}} +{{end}}{{if .VisibleFlags}} OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{ end }} + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} ` var helpCommand = Command{ Name: "help", - ShortName: "h", + Aliases: []string{"h"}, Usage: "Shows a list of commands or help for one command", - Action: func(c *Context) { + ArgsUsage: "[command]", + Action: func(c *Context) error { args := c.Args() if args.Present() { - ShowCommandHelp(c, args.First()) - } else { - ShowAppHelp(c) + return ShowCommandHelp(c, args.First()) } + + ShowAppHelp(c) + return nil }, } var helpSubcommand = Command{ Name: "help", - ShortName: "h", + Aliases: []string{"h"}, Usage: "Shows a list of commands or help for one command", - Action: func(c *Context) { + ArgsUsage: "[command]", + Action: func(c *Context) error { args := c.Args() if args.Present() { - ShowCommandHelp(c, args.First()) - } else { - ShowSubcommandHelp(c) + return ShowCommandHelp(c, args.First()) } + + return ShowSubcommandHelp(c) }, } -// Prints help for the App -var HelpPrinter = printHelp +// Prints help for the App or Command +type helpPrinter func(w io.Writer, templ string, data interface{}) +// HelpPrinter is a function that writes the help output. If not set a default +// is used. The function signature is: +// func(w io.Writer, templ string, data interface{}) +var HelpPrinter helpPrinter = printHelp + +// VersionPrinter prints the version for the App +var VersionPrinter = printVersion + +// ShowAppHelp is an action that displays the help. func ShowAppHelp(c *Context) { - HelpPrinter(AppHelpTemplate, c.App) + HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) } -// Prints the list of subcommands as the default app completion method +// DefaultAppComplete prints the list of subcommands as the default app completion method func DefaultAppComplete(c *Context) { for _, command := range c.App.Commands { - fmt.Println(command.Name) - if command.ShortName != "" { - fmt.Println(command.ShortName) + if command.Hidden { + continue + } + for _, name := range command.Names() { + fmt.Fprintln(c.App.Writer, name) } } } -// Prints help for the given command -func ShowCommandHelp(c *Context, command string) { - for _, c := range c.App.Commands { +// ShowCommandHelp prints help for the given command +func ShowCommandHelp(ctx *Context, command string) error { + // show the subcommand help for a command with subcommands + if command == "" { + HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) + return nil + } + + for _, c := range ctx.App.Commands { if c.HasName(command) { - HelpPrinter(CommandHelpTemplate, c) - return + HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) + return nil } } - if c.App.CommandNotFound != nil { - c.App.CommandNotFound(c, command) - } else { - fmt.Printf("No help topic for '%v'\n", command) + if ctx.App.CommandNotFound == nil { + return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3) } + + ctx.App.CommandNotFound(ctx, command) + return nil } -// Prints help for the given subcommand -func ShowSubcommandHelp(c *Context) { - HelpPrinter(SubcommandHelpTemplate, c.App) +// ShowSubcommandHelp prints help for the given subcommand +func ShowSubcommandHelp(c *Context) error { + return ShowCommandHelp(c, c.Command.Name) } -// Prints the version number of the App +// ShowVersion prints the version number of the App func ShowVersion(c *Context) { - fmt.Printf("%v version %v\n", c.App.Name, c.App.Version) + VersionPrinter(c) } -// Prints the lists of commands within a given context +func printVersion(c *Context) { + fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) +} + +// ShowCompletions prints the lists of commands within a given context func ShowCompletions(c *Context) { a := c.App if a != nil && a.BashComplete != nil { @@ -140,7 +178,7 @@ func ShowCompletions(c *Context) { } } -// Prints the custom completions for a given command +// ShowCommandCompletions prints the custom completions for a given command func ShowCommandCompletions(ctx *Context, command string) { c := ctx.App.Command(command) if c != nil && c.BashComplete != nil { @@ -148,32 +186,47 @@ func ShowCommandCompletions(ctx *Context, command string) { } } -func printHelp(templ string, data interface{}) { - w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0) - t := template.Must(template.New("help").Parse(templ)) +func printHelp(out io.Writer, templ string, data interface{}) { + funcMap := template.FuncMap{ + "join": strings.Join, + } + + w := tabwriter.NewWriter(out, 0, 8, 1, '\t', 0) + t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) err := t.Execute(w, data) if err != nil { - panic(err) + // If the writer is closed, t.Execute will fail, and there's nothing + // we can do to recover. + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } + return } w.Flush() } func checkVersion(c *Context) bool { - if c.GlobalBool("version") { - ShowVersion(c) - return true + found := false + if VersionFlag.Name != "" { + eachName(VersionFlag.Name, func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) } - - return false + return found } func checkHelp(c *Context) bool { - if c.GlobalBool("h") || c.GlobalBool("help") { - ShowAppHelp(c) - return true + found := false + if HelpFlag.Name != "" { + eachName(HelpFlag.Name, func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) } - - return false + return found } func checkCommandHelp(c *Context, name string) bool { @@ -195,7 +248,7 @@ func checkSubcommandHelp(c *Context) bool { } func checkCompletions(c *Context) bool { - if c.GlobalBool(BashCompletionFlag.Name) && c.App.EnableBashCompletion { + if (c.GlobalBool(BashCompletionFlag.Name) || c.Bool(BashCompletionFlag.Name)) && c.App.EnableBashCompletion { ShowCompletions(c) return true } diff --git a/vendor/github.com/codegangsta/cli/helpers_test.go b/vendor/github.com/codegangsta/cli/helpers_test.go deleted file mode 100644 index cdc4feb2..00000000 --- a/vendor/github.com/codegangsta/cli/helpers_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package cli_test - -import ( - "reflect" - "testing" -) - -/* Test Helpers */ -func expect(t *testing.T, a interface{}, b interface{}) { - if a != b { - t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} - -func refute(t *testing.T, a interface{}, b interface{}) { - if a == b { - t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} diff --git a/vendor/github.com/codegangsta/cli/runtests b/vendor/github.com/codegangsta/cli/runtests new file mode 100755 index 00000000..72c1f0dd --- /dev/null +++ b/vendor/github.com/codegangsta/cli/runtests @@ -0,0 +1,99 @@ +#!/usr/bin/env python +from __future__ import print_function + +import argparse +import os +import sys +import tempfile + +from subprocess import check_call, check_output + + +PACKAGE_NAME = os.environ.get( + 'CLI_PACKAGE_NAME', 'github.com/urfave/cli' +) + + +def main(sysargs=sys.argv[:]): + targets = { + 'vet': _vet, + 'test': _test, + 'gfmxr': _gfmxr + } + + parser = argparse.ArgumentParser() + parser.add_argument( + 'target', nargs='?', choices=tuple(targets.keys()), default='test' + ) + args = parser.parse_args(sysargs[1:]) + + targets[args.target]() + return 0 + + +def _test(): + if check_output('go version'.split()).split()[2] < 'go1.2': + _run('go test -v .'.split()) + return + + coverprofiles = [] + for subpackage in ['', 'altsrc']: + coverprofile = 'cli.coverprofile' + if subpackage != '': + coverprofile = '{}.coverprofile'.format(subpackage) + + coverprofiles.append(coverprofile) + + _run('go test -v'.split() + [ + '-coverprofile={}'.format(coverprofile), + ('{}/{}'.format(PACKAGE_NAME, subpackage)).rstrip('/') + ]) + + combined_name = _combine_coverprofiles(coverprofiles) + _run('go tool cover -func={}'.format(combined_name).split()) + os.remove(combined_name) + + +def _gfmxr(): + _run(['gfmxr', '-c', str(_gfmxr_count()), '-s', 'README.md']) + + +def _vet(): + _run('go vet ./...'.split()) + + +def _run(command): + print('runtests: {}'.format(' '.join(command)), file=sys.stderr) + check_call(command) + + +def _gfmxr_count(): + with open('README.md') as infile: + lines = infile.read().splitlines() + return len(filter(_is_go_runnable, lines)) + + +def _is_go_runnable(line): + return line.startswith('package main') + + +def _combine_coverprofiles(coverprofiles): + combined = tempfile.NamedTemporaryFile( + suffix='.coverprofile', delete=False + ) + combined.write('mode: set\n') + + for coverprofile in coverprofiles: + with open(coverprofile, 'r') as infile: + for line in infile.readlines(): + if not line.startswith('mode: '): + combined.write(line) + + combined.flush() + name = combined.name + combined.close() + return name + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/vendor/github.com/coreos/coreos-cloudinit/.travis.yml b/vendor/github.com/coreos/coreos-cloudinit/.travis.yml index 07494db3..62ee8cde 100644 --- a/vendor/github.com/coreos/coreos-cloudinit/.travis.yml +++ b/vendor/github.com/coreos/coreos-cloudinit/.travis.yml @@ -1,12 +1,9 @@ language: go -sudo: false matrix: include: - - go: 1.4 - install: - - go get golang.org/x/tools/cmd/cover - - go get golang.org/x/tools/cmd/vet - go: 1.5 + env: GO15VENDOREXPERIMENT=1 + - go: 1.6 script: - ./test diff --git a/vendor/github.com/coreos/coreos-cloudinit/MAINTAINERS b/vendor/github.com/coreos/coreos-cloudinit/MAINTAINERS index e358970b..a4d60bbf 100644 --- a/vendor/github.com/coreos/coreos-cloudinit/MAINTAINERS +++ b/vendor/github.com/coreos/coreos-cloudinit/MAINTAINERS @@ -1,3 +1,2 @@ Alex Crawford (@crawford) Jonathan Boulle (@jonboulle) -Brian Waldon (@bcwaldon) diff --git a/vendor/github.com/coreos/coreos-cloudinit/config/config.go b/vendor/github.com/coreos/coreos-cloudinit/config/config.go index ede380b4..ce598942 100644 --- a/vendor/github.com/coreos/coreos-cloudinit/config/config.go +++ b/vendor/github.com/coreos/coreos-cloudinit/config/config.go @@ -68,6 +68,22 @@ func NewCloudConfig(contents string) (*CloudConfig, error) { return &cfg, err } +// Decode decodes the content of cloud config. Currently only WriteFiles section +// supports several types of encoding and all of them are supported. After +// decode operation, Encoding type is unset. +func (cc *CloudConfig) Decode() error { + for i, file := range cc.WriteFiles { + content, err := DecodeContent(file.Content, file.Encoding) + if err != nil { + return err + } + + cc.WriteFiles[i].Content = string(content) + cc.WriteFiles[i].Encoding = "" + } + + return nil +} func (cc CloudConfig) String() string { bytes, err := yaml.Marshal(cc) if err != nil { diff --git a/vendor/github.com/coreos/coreos-cloudinit/config/config_test.go b/vendor/github.com/coreos/coreos-cloudinit/config/config_test.go deleted file mode 100644 index efcdd18b..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/config/config_test.go +++ /dev/null @@ -1,502 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "reflect" - "regexp" - "strings" - "testing" -) - -func TestNewCloudConfig(t *testing.T) { - tests := []struct { - contents string - - config CloudConfig - }{ - {}, - { - contents: "#cloud-config\nwrite_files:\n - path: underscore", - config: CloudConfig{WriteFiles: []File{File{Path: "underscore"}}}, - }, - { - contents: "#cloud-config\nwrite-files:\n - path: hyphen", - config: CloudConfig{WriteFiles: []File{File{Path: "hyphen"}}}, - }, - { - contents: "#cloud-config\ncoreos:\n update:\n reboot-strategy: off", - config: CloudConfig{CoreOS: CoreOS{Update: Update{RebootStrategy: "off"}}}, - }, - { - contents: "#cloud-config\ncoreos:\n update:\n reboot-strategy: false", - config: CloudConfig{CoreOS: CoreOS{Update: Update{RebootStrategy: "false"}}}, - }, - { - contents: "#cloud-config\nwrite_files:\n - permissions: 0744", - config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "0744"}}}, - }, - { - contents: "#cloud-config\nwrite_files:\n - permissions: 744", - config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "744"}}}, - }, - { - contents: "#cloud-config\nwrite_files:\n - permissions: '0744'", - config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "0744"}}}, - }, - { - contents: "#cloud-config\nwrite_files:\n - permissions: '744'", - config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "744"}}}, - }, - } - - for i, tt := range tests { - config, err := NewCloudConfig(tt.contents) - if err != nil { - t.Errorf("bad error (test case #%d): want %v, got %s", i, nil, err) - } - if !reflect.DeepEqual(&tt.config, config) { - t.Errorf("bad config (test case #%d): want %#v, got %#v", i, tt.config, config) - } - } -} - -func TestIsZero(t *testing.T) { - tests := []struct { - c interface{} - - empty bool - }{ - {struct{}{}, true}, - {struct{ a, b string }{}, true}, - {struct{ A, b string }{}, true}, - {struct{ A, B string }{}, true}, - {struct{ A string }{A: "hello"}, false}, - {struct{ A int }{}, true}, - {struct{ A int }{A: 1}, false}, - } - - for _, tt := range tests { - if empty := IsZero(tt.c); tt.empty != empty { - t.Errorf("bad result (%q): want %t, got %t", tt.c, tt.empty, empty) - } - } -} - -func TestAssertStructValid(t *testing.T) { - tests := []struct { - c interface{} - - err error - }{ - {struct{}{}, nil}, - {struct { - A, b string `valid:"^1|2$"` - }{}, nil}, - {struct { - A, b string `valid:"^1|2$"` - }{A: "1", b: "2"}, nil}, - {struct { - A, b string `valid:"^1|2$"` - }{A: "1", b: "hello"}, nil}, - {struct { - A, b string `valid:"^1|2$"` - }{A: "hello", b: "2"}, &ErrorValid{Value: "hello", Field: "A", Valid: "^1|2$"}}, - {struct { - A, b int `valid:"^1|2$"` - }{}, nil}, - {struct { - A, b int `valid:"^1|2$"` - }{A: 1, b: 2}, nil}, - {struct { - A, b int `valid:"^1|2$"` - }{A: 1, b: 9}, nil}, - {struct { - A, b int `valid:"^1|2$"` - }{A: 9, b: 2}, &ErrorValid{Value: "9", Field: "A", Valid: "^1|2$"}}, - } - - for _, tt := range tests { - if err := AssertStructValid(tt.c); !reflect.DeepEqual(tt.err, err) { - t.Errorf("bad result (%q): want %q, got %q", tt.c, tt.err, err) - } - } -} - -func TestConfigCompile(t *testing.T) { - tests := []interface{}{ - Etcd{}, - File{}, - Flannel{}, - Fleet{}, - Locksmith{}, - OEM{}, - Unit{}, - Update{}, - } - - for _, tt := range tests { - ttt := reflect.TypeOf(tt) - for i := 0; i < ttt.NumField(); i++ { - ft := ttt.Field(i) - if !isFieldExported(ft) { - continue - } - - if _, err := regexp.Compile(ft.Tag.Get("valid")); err != nil { - t.Errorf("bad regexp(%s.%s): want %v, got %s", ttt.Name(), ft.Name, nil, err) - } - } - } -} - -func TestCloudConfigUnknownKeys(t *testing.T) { - contents := ` -coreos: - etcd: - discovery: "https://discovery.etcd.io/827c73219eeb2fa5530027c37bf18877" - coreos_unknown: - foo: "bar" -section_unknown: - dunno: - something -bare_unknown: - bar -write_files: - - content: fun - path: /var/party - file_unknown: nofun -users: - - name: fry - passwd: somehash - user_unknown: philip -hostname: - foo -` - cfg, err := NewCloudConfig(contents) - if err != nil { - t.Fatalf("error instantiating CloudConfig with unknown keys: %v", err) - } - if cfg.Hostname != "foo" { - t.Fatalf("hostname not correctly set when invalid keys are present") - } - if cfg.CoreOS.Etcd.Discovery != "https://discovery.etcd.io/827c73219eeb2fa5530027c37bf18877" { - t.Fatalf("etcd section not correctly set when invalid keys are present") - } - if len(cfg.WriteFiles) < 1 || cfg.WriteFiles[0].Content != "fun" || cfg.WriteFiles[0].Path != "/var/party" { - t.Fatalf("write_files section not correctly set when invalid keys are present") - } - if len(cfg.Users) < 1 || cfg.Users[0].Name != "fry" || cfg.Users[0].PasswordHash != "somehash" { - t.Fatalf("users section not correctly set when invalid keys are present") - } -} - -// Assert that the parsing of a cloud config file "generally works" -func TestCloudConfigEmpty(t *testing.T) { - cfg, err := NewCloudConfig("") - if err != nil { - t.Fatalf("Encountered unexpected error :%v", err) - } - - keys := cfg.SSHAuthorizedKeys - if len(keys) != 0 { - t.Error("Parsed incorrect number of SSH keys") - } - - if len(cfg.WriteFiles) != 0 { - t.Error("Expected zero WriteFiles") - } - - if cfg.Hostname != "" { - t.Errorf("Expected hostname to be empty, got '%s'", cfg.Hostname) - } -} - -// Assert that the parsing of a cloud config file "generally works" -func TestCloudConfig(t *testing.T) { - contents := ` -coreos: - etcd: - discovery: "https://discovery.etcd.io/827c73219eeb2fa5530027c37bf18877" - update: - reboot_strategy: reboot - units: - - name: 50-eth0.network - runtime: yes - content: '[Match] - - Name=eth47 - - - [Network] - - Address=10.209.171.177/19 - -' - oem: - id: rackspace - name: Rackspace Cloud Servers - version_id: 168.0.0 - home_url: https://www.rackspace.com/cloud/servers/ - bug_report_url: https://github.com/coreos/coreos-overlay -ssh_authorized_keys: - - foobar - - foobaz -write_files: - - content: | - penny - elroy - path: /etc/dogepack.conf - permissions: '0644' - owner: root:dogepack -hostname: trontastic -` - cfg, err := NewCloudConfig(contents) - if err != nil { - t.Fatalf("Encountered unexpected error :%v", err) - } - - keys := cfg.SSHAuthorizedKeys - if len(keys) != 2 { - t.Error("Parsed incorrect number of SSH keys") - } else if keys[0] != "foobar" { - t.Error("Expected first SSH key to be 'foobar'") - } else if keys[1] != "foobaz" { - t.Error("Expected first SSH key to be 'foobaz'") - } - - if len(cfg.WriteFiles) != 1 { - t.Error("Failed to parse correct number of write_files") - } else { - wf := cfg.WriteFiles[0] - if wf.Content != "penny\nelroy\n" { - t.Errorf("WriteFile has incorrect contents '%s'", wf.Content) - } - if wf.Encoding != "" { - t.Errorf("WriteFile has incorrect encoding %s", wf.Encoding) - } - if wf.RawFilePermissions != "0644" { - t.Errorf("WriteFile has incorrect permissions %s", wf.RawFilePermissions) - } - if wf.Path != "/etc/dogepack.conf" { - t.Errorf("WriteFile has incorrect path %s", wf.Path) - } - if wf.Owner != "root:dogepack" { - t.Errorf("WriteFile has incorrect owner %s", wf.Owner) - } - } - - if len(cfg.CoreOS.Units) != 1 { - t.Error("Failed to parse correct number of units") - } else { - u := cfg.CoreOS.Units[0] - expect := `[Match] -Name=eth47 - -[Network] -Address=10.209.171.177/19 -` - if u.Content != expect { - t.Errorf("Unit has incorrect contents '%s'.\nExpected '%s'.", u.Content, expect) - } - if u.Runtime != true { - t.Errorf("Unit has incorrect runtime value") - } - if u.Name != "50-eth0.network" { - t.Errorf("Unit has incorrect name %s", u.Name) - } - } - - if cfg.CoreOS.OEM.ID != "rackspace" { - t.Errorf("Failed parsing coreos.oem. Expected ID 'rackspace', got %q.", cfg.CoreOS.OEM.ID) - } - - if cfg.Hostname != "trontastic" { - t.Errorf("Failed to parse hostname") - } - if cfg.CoreOS.Update.RebootStrategy != "reboot" { - t.Errorf("Failed to parse locksmith strategy") - } -} - -// Assert that our interface conversion doesn't panic -func TestCloudConfigKeysNotList(t *testing.T) { - contents := ` -ssh_authorized_keys: - - foo: bar -` - cfg, err := NewCloudConfig(contents) - if err != nil { - t.Fatalf("Encountered unexpected error: %v", err) - } - - keys := cfg.SSHAuthorizedKeys - if len(keys) != 0 { - t.Error("Parsed incorrect number of SSH keys") - } -} - -func TestCloudConfigSerializationHeader(t *testing.T) { - cfg, _ := NewCloudConfig("") - contents := cfg.String() - header := strings.SplitN(contents, "\n", 2)[0] - if header != "#cloud-config" { - t.Fatalf("Serialized config did not have expected header") - } -} - -func TestCloudConfigUsers(t *testing.T) { - contents := ` -users: - - name: elroy - passwd: somehash - ssh_authorized_keys: - - somekey - gecos: arbitrary comment - homedir: /home/place - no_create_home: yes - primary_group: things - groups: - - ping - - pong - no_user_group: true - system: y - no_log_init: True - shell: /bin/sh -` - cfg, err := NewCloudConfig(contents) - if err != nil { - t.Fatalf("Encountered unexpected error: %v", err) - } - - if len(cfg.Users) != 1 { - t.Fatalf("Parsed %d users, expected 1", len(cfg.Users)) - } - - user := cfg.Users[0] - - if user.Name != "elroy" { - t.Errorf("User name is %q, expected 'elroy'", user.Name) - } - - if user.PasswordHash != "somehash" { - t.Errorf("User passwd is %q, expected 'somehash'", user.PasswordHash) - } - - if keys := user.SSHAuthorizedKeys; len(keys) != 1 { - t.Errorf("Parsed %d ssh keys, expected 1", len(keys)) - } else { - key := user.SSHAuthorizedKeys[0] - if key != "somekey" { - t.Errorf("User SSH key is %q, expected 'somekey'", key) - } - } - - if user.GECOS != "arbitrary comment" { - t.Errorf("Failed to parse gecos field, got %q", user.GECOS) - } - - if user.Homedir != "/home/place" { - t.Errorf("Failed to parse homedir field, got %q", user.Homedir) - } - - if !user.NoCreateHome { - t.Errorf("Failed to parse no_create_home field") - } - - if user.PrimaryGroup != "things" { - t.Errorf("Failed to parse primary_group field, got %q", user.PrimaryGroup) - } - - if len(user.Groups) != 2 { - t.Errorf("Failed to parse 2 goups, got %d", len(user.Groups)) - } else { - if user.Groups[0] != "ping" { - t.Errorf("First group was %q, not expected value 'ping'", user.Groups[0]) - } - if user.Groups[1] != "pong" { - t.Errorf("First group was %q, not expected value 'pong'", user.Groups[1]) - } - } - - if !user.NoUserGroup { - t.Errorf("Failed to parse no_user_group field") - } - - if !user.System { - t.Errorf("Failed to parse system field") - } - - if !user.NoLogInit { - t.Errorf("Failed to parse no_log_init field") - } - - if user.Shell != "/bin/sh" { - t.Errorf("Failed to parse shell field, got %q", user.Shell) - } -} - -func TestCloudConfigUsersGithubUser(t *testing.T) { - - contents := ` -users: - - name: elroy - coreos_ssh_import_github: bcwaldon -` - cfg, err := NewCloudConfig(contents) - if err != nil { - t.Fatalf("Encountered unexpected error: %v", err) - } - - if len(cfg.Users) != 1 { - t.Fatalf("Parsed %d users, expected 1", len(cfg.Users)) - } - - user := cfg.Users[0] - - if user.Name != "elroy" { - t.Errorf("User name is %q, expected 'elroy'", user.Name) - } - - if user.SSHImportGithubUser != "bcwaldon" { - t.Errorf("github user is %q, expected 'bcwaldon'", user.SSHImportGithubUser) - } -} - -func TestCloudConfigUsersSSHImportURL(t *testing.T) { - contents := ` -users: - - name: elroy - coreos_ssh_import_url: https://token:x-auth-token@github.enterprise.com/api/v3/polvi/keys -` - cfg, err := NewCloudConfig(contents) - if err != nil { - t.Fatalf("Encountered unexpected error: %v", err) - } - - if len(cfg.Users) != 1 { - t.Fatalf("Parsed %d users, expected 1", len(cfg.Users)) - } - - user := cfg.Users[0] - - if user.Name != "elroy" { - t.Errorf("User name is %q, expected 'elroy'", user.Name) - } - - if user.SSHImportURL != "https://token:x-auth-token@github.enterprise.com/api/v3/polvi/keys" { - t.Errorf("ssh import url is %q, expected 'https://token:x-auth-token@github.enterprise.com/api/v3/polvi/keys'", user.SSHImportURL) - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/config/etcd2.go b/vendor/github.com/coreos/coreos-cloudinit/config/etcd2.go index 4070800e..d0e0575a 100644 --- a/vendor/github.com/coreos/coreos-cloudinit/config/etcd2.go +++ b/vendor/github.com/coreos/coreos-cloudinit/config/etcd2.go @@ -27,6 +27,7 @@ type Etcd2 struct { DiscoverySRV string `yaml:"discovery_srv" env:"ETCD_DISCOVERY_SRV"` DiscoveryProxy string `yaml:"discovery_proxy" env:"ETCD_DISCOVERY_PROXY"` ElectionTimeout int `yaml:"election_timeout" env:"ETCD_ELECTION_TIMEOUT"` + EnablePprof bool `yaml:"enable_pprof" env:"ETCD_ENABLE_PPROF"` ForceNewCluster bool `yaml:"force_new_cluster" env:"ETCD_FORCE_NEW_CLUSTER"` HeartbeatInterval int `yaml:"heartbeat_interval" env:"ETCD_HEARTBEAT_INTERVAL"` InitialAdvertisePeerURLs string `yaml:"initial_advertise_peer_urls" env:"ETCD_INITIAL_ADVERTISE_PEER_URLS"` @@ -52,6 +53,7 @@ type Etcd2 struct { ProxyRefreshInterval int `yaml:"proxy_refresh_interval" env:"ETCD_PROXY_REFRESH_INTERVAL"` ProxyWriteTimeout int `yaml:"proxy_write_timeout" env:"ETCD_PROXY_WRITE_TIMEOUT"` SnapshotCount int `yaml:"snapshot_count" env:"ETCD_SNAPSHOT_COUNT"` + StrictReconfigCheck bool `yaml:"strict_reconfig_check" env:"ETCD_STRICT_RECONFIG_CHECK"` TrustedCAFile string `yaml:"trusted_ca_file" env:"ETCD_TRUSTED_CA_FILE"` WalDir string `yaml:"wal_dir" env:"ETCD_WAL_DIR"` } diff --git a/vendor/github.com/coreos/coreos-cloudinit/config/file_test.go b/vendor/github.com/coreos/coreos-cloudinit/config/file_test.go deleted file mode 100644 index 60a8efe2..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/config/file_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "testing" -) - -func TestEncodingValid(t *testing.T) { - tests := []struct { - value string - - isValid bool - }{ - {value: "base64", isValid: true}, - {value: "b64", isValid: true}, - {value: "gz", isValid: true}, - {value: "gzip", isValid: true}, - {value: "gz+base64", isValid: true}, - {value: "gzip+base64", isValid: true}, - {value: "gz+b64", isValid: true}, - {value: "gzip+b64", isValid: true}, - {value: "gzzzzbase64", isValid: false}, - {value: "gzipppbase64", isValid: false}, - {value: "unknown", isValid: false}, - } - - for _, tt := range tests { - isValid := (nil == AssertStructValid(File{Encoding: tt.value})) - if tt.isValid != isValid { - t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid) - } - } -} - -func TestRawFilePermissionsValid(t *testing.T) { - tests := []struct { - value string - - isValid bool - }{ - {value: "744", isValid: true}, - {value: "0744", isValid: true}, - {value: "1744", isValid: true}, - {value: "01744", isValid: true}, - {value: "11744", isValid: false}, - {value: "rwxr--r--", isValid: false}, - {value: "800", isValid: false}, - } - - for _, tt := range tests { - isValid := (nil == AssertStructValid(File{RawFilePermissions: tt.value})) - if tt.isValid != isValid { - t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/config/ignition.go b/vendor/github.com/coreos/coreos-cloudinit/config/ignition.go index 413a636c..2d9bdd50 100644 --- a/vendor/github.com/coreos/coreos-cloudinit/config/ignition.go +++ b/vendor/github.com/coreos/coreos-cloudinit/config/ignition.go @@ -20,7 +20,10 @@ import ( func IsIgnitionConfig(userdata string) bool { var cfg struct { - Version *int `json:"ignitionVersion" yaml:"ignition_version"` + Version *int `json:"ignitionVersion"` + Ignition struct { + Version *string `json:"version"` + } `json:"ignition"` } - return (json.Unmarshal([]byte(userdata), &cfg) == nil && cfg.Version != nil) + return (json.Unmarshal([]byte(userdata), &cfg) == nil && (cfg.Version != nil || cfg.Ignition.Version != nil)) } diff --git a/vendor/github.com/coreos/coreos-cloudinit/config/locksmith_test.go b/vendor/github.com/coreos/coreos-cloudinit/config/locksmith_test.go deleted file mode 100644 index a8c4eb57..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/config/locksmith_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "testing" -) - -func TestRebootWindowStart(t *testing.T) { - tests := []struct { - value string - - isValid bool - }{ - {value: "Sun 0:0", isValid: true}, - {value: "Sun 00:00", isValid: true}, - {value: "sUn 23:59", isValid: true}, - {value: "mon 0:0", isValid: true}, - {value: "tue 0:0", isValid: true}, - {value: "tues 0:0", isValid: false}, - {value: "wed 0:0", isValid: true}, - {value: "thu 0:0", isValid: true}, - {value: "thur 0:0", isValid: false}, - {value: "fri 0:0", isValid: true}, - {value: "sat 0:0", isValid: true}, - {value: "sat00:00", isValid: false}, - {value: "00:00", isValid: true}, - {value: "10:10", isValid: true}, - {value: "20:20", isValid: true}, - {value: "20:30", isValid: true}, - {value: "20:40", isValid: true}, - {value: "20:50", isValid: true}, - {value: "20:60", isValid: false}, - {value: "24:00", isValid: false}, - } - - for _, tt := range tests { - isValid := (nil == AssertStructValid(Locksmith{RebootWindowStart: tt.value})) - if tt.isValid != isValid { - t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid) - } - } -} - -func TestRebootWindowLength(t *testing.T) { - tests := []struct { - value string - - isValid bool - }{ - {value: "1h", isValid: true}, - {value: "1d", isValid: true}, - {value: "0d", isValid: true}, - {value: "0.5h", isValid: true}, - {value: "0.5.0h", isValid: false}, - } - - for _, tt := range tests { - isValid := (nil == AssertStructValid(Locksmith{RebootWindowLength: tt.value})) - if tt.isValid != isValid { - t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/config/unit_test.go b/vendor/github.com/coreos/coreos-cloudinit/config/unit_test.go deleted file mode 100644 index d94dd8ba..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/config/unit_test.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - Copyright 2014 CoreOS, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package config - -import ( - "testing" -) - -func TestCommandValid(t *testing.T) { - tests := []struct { - value string - - isValid bool - }{ - {value: "start", isValid: true}, - {value: "stop", isValid: true}, - {value: "restart", isValid: true}, - {value: "reload", isValid: true}, - {value: "try-restart", isValid: true}, - {value: "reload-or-restart", isValid: true}, - {value: "reload-or-try-restart", isValid: true}, - {value: "tryrestart", isValid: false}, - {value: "unknown", isValid: false}, - } - - for _, tt := range tests { - isValid := (nil == AssertStructValid(Unit{Command: tt.value})) - if tt.isValid != isValid { - t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/config/update_test.go b/vendor/github.com/coreos/coreos-cloudinit/config/update_test.go deleted file mode 100644 index 2b0851eb..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/config/update_test.go +++ /dev/null @@ -1,43 +0,0 @@ -/* - Copyright 2014 CoreOS, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package config - -import ( - "testing" -) - -func TestRebootStrategyValid(t *testing.T) { - tests := []struct { - value string - - isValid bool - }{ - {value: "best-effort", isValid: true}, - {value: "etcd-lock", isValid: true}, - {value: "reboot", isValid: true}, - {value: "off", isValid: true}, - {value: "besteffort", isValid: false}, - {value: "unknown", isValid: false}, - } - - for _, tt := range tests { - isValid := (nil == AssertStructValid(Update{RebootStrategy: tt.value})) - if tt.isValid != isValid { - t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/coreos-cloudinit.go b/vendor/github.com/coreos/coreos-cloudinit/coreos-cloudinit.go deleted file mode 100644 index d82a72dd..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/coreos-cloudinit.go +++ /dev/null @@ -1,423 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "bytes" - "compress/gzip" - "flag" - "fmt" - "io/ioutil" - "log" - "os" - "runtime" - "sync" - "time" - - "github.com/coreos/coreos-cloudinit/config" - "github.com/coreos/coreos-cloudinit/config/validate" - "github.com/coreos/coreos-cloudinit/datasource" - "github.com/coreos/coreos-cloudinit/datasource/configdrive" - "github.com/coreos/coreos-cloudinit/datasource/file" - "github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma" - "github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean" - "github.com/coreos/coreos-cloudinit/datasource/metadata/ec2" - "github.com/coreos/coreos-cloudinit/datasource/metadata/packet" - "github.com/coreos/coreos-cloudinit/datasource/proc_cmdline" - "github.com/coreos/coreos-cloudinit/datasource/url" - "github.com/coreos/coreos-cloudinit/datasource/vmware" - "github.com/coreos/coreos-cloudinit/datasource/waagent" - "github.com/coreos/coreos-cloudinit/initialize" - "github.com/coreos/coreos-cloudinit/network" - "github.com/coreos/coreos-cloudinit/pkg" - "github.com/coreos/coreos-cloudinit/system" -) - -const ( - datasourceInterval = 100 * time.Millisecond - datasourceMaxInterval = 30 * time.Second - datasourceTimeout = 5 * time.Minute -) - -var ( - flags = struct { - printVersion bool - ignoreFailure bool - sources struct { - file string - configDrive string - waagent string - metadataService bool - ec2MetadataService string - cloudSigmaMetadataService bool - digitalOceanMetadataService string - packetMetadataService string - url string - procCmdLine bool - vmware bool - } - convertNetconf string - workspace string - sshKeyName string - oem string - validate bool - }{} - version = "was not built properly" -) - -func init() { - flag.BoolVar(&flags.printVersion, "version", false, "Print the version and exit") - flag.BoolVar(&flags.ignoreFailure, "ignore-failure", false, "Exits with 0 status in the event of malformed input from user-data") - flag.StringVar(&flags.sources.file, "from-file", "", "Read user-data from provided file") - flag.StringVar(&flags.sources.configDrive, "from-configdrive", "", "Read data from provided cloud-drive directory") - flag.StringVar(&flags.sources.waagent, "from-waagent", "", "Read data from provided waagent directory") - flag.BoolVar(&flags.sources.metadataService, "from-metadata-service", false, "[DEPRECATED - Use -from-ec2-metadata] Download data from metadata service") - flag.StringVar(&flags.sources.ec2MetadataService, "from-ec2-metadata", "", "Download EC2 data from the provided url") - flag.BoolVar(&flags.sources.cloudSigmaMetadataService, "from-cloudsigma-metadata", false, "Download data from CloudSigma server context") - flag.StringVar(&flags.sources.digitalOceanMetadataService, "from-digitalocean-metadata", "", "Download DigitalOcean data from the provided url") - flag.StringVar(&flags.sources.packetMetadataService, "from-packet-metadata", "", "Download Packet data from metadata service") - flag.StringVar(&flags.sources.url, "from-url", "", "Download user-data from provided url") - flag.BoolVar(&flags.sources.procCmdLine, "from-proc-cmdline", false, fmt.Sprintf("Parse %s for '%s=', using the cloud-config served by an HTTP GET to ", proc_cmdline.ProcCmdlineLocation, proc_cmdline.ProcCmdlineCloudConfigFlag)) - flag.BoolVar(&flags.sources.vmware, "from-vmware-guestinfo", false, "Read data from VMware guestinfo") - flag.StringVar(&flags.oem, "oem", "", "Use the settings specific to the provided OEM") - flag.StringVar(&flags.convertNetconf, "convert-netconf", "", "Read the network config provided in cloud-drive and translate it from the specified format into networkd unit files") - flag.StringVar(&flags.workspace, "workspace", "/var/lib/coreos-cloudinit", "Base directory coreos-cloudinit should use to store data") - flag.StringVar(&flags.sshKeyName, "ssh-key-name", initialize.DefaultSSHKeyName, "Add SSH keys to the system with the given name") - flag.BoolVar(&flags.validate, "validate", false, "[EXPERIMENTAL] Validate the user-data but do not apply it to the system") -} - -type oemConfig map[string]string - -var ( - oemConfigs = map[string]oemConfig{ - "digitalocean": oemConfig{ - "from-digitalocean-metadata": "http://169.254.169.254/", - "convert-netconf": "digitalocean", - }, - "ec2-compat": oemConfig{ - "from-ec2-metadata": "http://169.254.169.254/", - "from-configdrive": "/media/configdrive", - }, - "rackspace-onmetal": oemConfig{ - "from-configdrive": "/media/configdrive", - "convert-netconf": "debian", - }, - "azure": oemConfig{ - "from-waagent": "/var/lib/waagent", - }, - "cloudsigma": oemConfig{ - "from-cloudsigma-metadata": "true", - }, - "packet": oemConfig{ - "from-packet-metadata": "https://metadata.packet.net/", - }, - "vmware": oemConfig{ - "from-vmware-guestinfo": "true", - "convert-netconf": "vmware", - }, - } -) - -func main() { - failure := false - - // Conservative Go 1.5 upgrade strategy: - // keep GOMAXPROCS' default at 1 for now. - if os.Getenv("GOMAXPROCS") == "" { - runtime.GOMAXPROCS(1) - } - - flag.Parse() - - if c, ok := oemConfigs[flags.oem]; ok { - for k, v := range c { - flag.Set(k, v) - } - } else if flags.oem != "" { - oems := make([]string, 0, len(oemConfigs)) - for k := range oemConfigs { - oems = append(oems, k) - } - fmt.Printf("Invalid option to -oem: %q. Supported options: %q\n", flags.oem, oems) - os.Exit(2) - } - - if flags.printVersion == true { - fmt.Printf("coreos-cloudinit %s\n", version) - os.Exit(0) - } - - switch flags.convertNetconf { - case "": - case "debian": - case "digitalocean": - case "packet": - case "vmware": - default: - fmt.Printf("Invalid option to -convert-netconf: '%s'. Supported options: 'debian, digitalocean, packet, vmware'\n", flags.convertNetconf) - os.Exit(2) - } - - dss := getDatasources() - if len(dss) == 0 { - fmt.Println("Provide at least one of --from-file, --from-configdrive, --from-ec2-metadata, --from-cloudsigma-metadata, --from-packet-metadata, --from-digitalocean-metadata, --from-vmware-guestinfo, --from-waagent, --from-url or --from-proc-cmdline") - os.Exit(2) - } - - ds := selectDatasource(dss) - if ds == nil { - log.Println("No datasources available in time") - os.Exit(1) - } - - log.Printf("Fetching user-data from datasource of type %q\n", ds.Type()) - userdataBytes, err := ds.FetchUserdata() - if err != nil { - log.Printf("Failed fetching user-data from datasource: %v. Continuing...\n", err) - failure = true - } - userdataBytes, err = decompressIfGzip(userdataBytes) - if err != nil { - log.Printf("Failed decompressing user-data from datasource: %v. Continuing...\n", err) - failure = true - } - - if report, err := validate.Validate(userdataBytes); err == nil { - ret := 0 - for _, e := range report.Entries() { - log.Println(e) - ret = 1 - } - if flags.validate { - os.Exit(ret) - } - } else { - log.Printf("Failed while validating user_data (%q)\n", err) - if flags.validate { - os.Exit(1) - } - } - - log.Printf("Fetching meta-data from datasource of type %q\n", ds.Type()) - metadata, err := ds.FetchMetadata() - if err != nil { - log.Printf("Failed fetching meta-data from datasource: %v\n", err) - os.Exit(1) - } - - // Apply environment to user-data - env := initialize.NewEnvironment("/", ds.ConfigRoot(), flags.workspace, flags.sshKeyName, metadata) - userdata := env.Apply(string(userdataBytes)) - - var ccu *config.CloudConfig - var script *config.Script - switch ud, err := initialize.ParseUserData(userdata); err { - case initialize.ErrIgnitionConfig: - fmt.Printf("Detected an Ignition config. Exiting...") - os.Exit(0) - case nil: - switch t := ud.(type) { - case *config.CloudConfig: - ccu = t - case *config.Script: - script = t - } - default: - fmt.Printf("Failed to parse user-data: %v\nContinuing...\n", err) - failure = true - } - - log.Println("Merging cloud-config from meta-data and user-data") - cc := mergeConfigs(ccu, metadata) - - var ifaces []network.InterfaceGenerator - if flags.convertNetconf != "" { - var err error - switch flags.convertNetconf { - case "debian": - ifaces, err = network.ProcessDebianNetconf(metadata.NetworkConfig.([]byte)) - case "digitalocean": - ifaces, err = network.ProcessDigitalOceanNetconf(metadata.NetworkConfig.(digitalocean.Metadata)) - case "packet": - ifaces, err = network.ProcessPacketNetconf(metadata.NetworkConfig.(packet.NetworkData)) - case "vmware": - ifaces, err = network.ProcessVMwareNetconf(metadata.NetworkConfig.(map[string]string)) - default: - err = fmt.Errorf("Unsupported network config format %q", flags.convertNetconf) - } - if err != nil { - log.Printf("Failed to generate interfaces: %v\n", err) - os.Exit(1) - } - } - - if err = initialize.Apply(cc, ifaces, env); err != nil { - log.Printf("Failed to apply cloud-config: %v\n", err) - os.Exit(1) - } - - if script != nil { - if err = runScript(*script, env); err != nil { - log.Printf("Failed to run script: %v\n", err) - os.Exit(1) - } - } - - if failure && !flags.ignoreFailure { - os.Exit(1) - } -} - -// mergeConfigs merges certain options from md (meta-data from the datasource) -// onto cc (a CloudConfig derived from user-data), if they are not already set -// on cc (i.e. user-data always takes precedence) -func mergeConfigs(cc *config.CloudConfig, md datasource.Metadata) (out config.CloudConfig) { - if cc != nil { - out = *cc - } - - if md.Hostname != "" { - if out.Hostname != "" { - log.Printf("Warning: user-data hostname (%s) overrides metadata hostname (%s)\n", out.Hostname, md.Hostname) - } else { - out.Hostname = md.Hostname - } - } - for _, key := range md.SSHPublicKeys { - out.SSHAuthorizedKeys = append(out.SSHAuthorizedKeys, key) - } - return -} - -// getDatasources creates a slice of possible Datasources for cloudinit based -// on the different source command-line flags. -func getDatasources() []datasource.Datasource { - dss := make([]datasource.Datasource, 0, 5) - if flags.sources.file != "" { - dss = append(dss, file.NewDatasource(flags.sources.file)) - } - if flags.sources.url != "" { - dss = append(dss, url.NewDatasource(flags.sources.url)) - } - if flags.sources.configDrive != "" { - dss = append(dss, configdrive.NewDatasource(flags.sources.configDrive)) - } - if flags.sources.metadataService { - dss = append(dss, ec2.NewDatasource(ec2.DefaultAddress)) - } - if flags.sources.ec2MetadataService != "" { - dss = append(dss, ec2.NewDatasource(flags.sources.ec2MetadataService)) - } - if flags.sources.cloudSigmaMetadataService { - dss = append(dss, cloudsigma.NewServerContextService()) - } - if flags.sources.digitalOceanMetadataService != "" { - dss = append(dss, digitalocean.NewDatasource(flags.sources.digitalOceanMetadataService)) - } - if flags.sources.waagent != "" { - dss = append(dss, waagent.NewDatasource(flags.sources.waagent)) - } - if flags.sources.packetMetadataService != "" { - dss = append(dss, packet.NewDatasource(flags.sources.packetMetadataService)) - } - if flags.sources.procCmdLine { - dss = append(dss, proc_cmdline.NewDatasource()) - } - if flags.sources.vmware { - dss = append(dss, vmware.NewDatasource()) - } - return dss -} - -// selectDatasource attempts to choose a valid Datasource to use based on its -// current availability. The first Datasource to report to be available is -// returned. Datasources will be retried if possible if they are not -// immediately available. If all Datasources are permanently unavailable or -// datasourceTimeout is reached before one becomes available, nil is returned. -func selectDatasource(sources []datasource.Datasource) datasource.Datasource { - ds := make(chan datasource.Datasource) - stop := make(chan struct{}) - var wg sync.WaitGroup - - for _, s := range sources { - wg.Add(1) - go func(s datasource.Datasource) { - defer wg.Done() - - duration := datasourceInterval - for { - log.Printf("Checking availability of %q\n", s.Type()) - if s.IsAvailable() { - ds <- s - return - } else if !s.AvailabilityChanges() { - return - } - select { - case <-stop: - return - case <-time.After(duration): - duration = pkg.ExpBackoff(duration, datasourceMaxInterval) - } - } - }(s) - } - - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - var s datasource.Datasource - select { - case s = <-ds: - case <-done: - case <-time.After(datasourceTimeout): - } - - close(stop) - return s -} - -// TODO(jonboulle): this should probably be refactored and moved into a different module -func runScript(script config.Script, env *initialize.Environment) error { - err := initialize.PrepWorkspace(env.Workspace()) - if err != nil { - log.Printf("Failed preparing workspace: %v\n", err) - return err - } - path, err := initialize.PersistScriptInWorkspace(script, env.Workspace()) - if err == nil { - var name string - name, err = system.ExecuteScript(path) - initialize.PersistUnitNameInWorkspace(name, env.Workspace()) - } - return err -} - -const gzipMagicBytes = "\x1f\x8b" - -func decompressIfGzip(userdataBytes []byte) ([]byte, error) { - if !bytes.HasPrefix(userdataBytes, []byte(gzipMagicBytes)) { - return userdataBytes, nil - } - gzr, err := gzip.NewReader(bytes.NewReader(userdataBytes)) - if err != nil { - return nil, err - } - defer gzr.Close() - return ioutil.ReadAll(gzr) -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/coreos-cloudinit_test.go b/vendor/github.com/coreos/coreos-cloudinit/coreos-cloudinit_test.go deleted file mode 100644 index cd5c6da4..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/coreos-cloudinit_test.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "bytes" - "encoding/base64" - "errors" - "reflect" - "testing" - - "github.com/coreos/coreos-cloudinit/config" - "github.com/coreos/coreos-cloudinit/datasource" -) - -func TestMergeConfigs(t *testing.T) { - tests := []struct { - cc *config.CloudConfig - md datasource.Metadata - - out config.CloudConfig - }{ - { - // If md is empty and cc is nil, result should be empty - out: config.CloudConfig{}, - }, - { - // If md and cc are empty, result should be empty - cc: &config.CloudConfig{}, - out: config.CloudConfig{}, - }, - { - // If cc is empty, cc should be returned unchanged - cc: &config.CloudConfig{SSHAuthorizedKeys: []string{"abc", "def"}, Hostname: "cc-host"}, - out: config.CloudConfig{SSHAuthorizedKeys: []string{"abc", "def"}, Hostname: "cc-host"}, - }, - { - // If cc is empty, cc should be returned unchanged(overridden) - cc: &config.CloudConfig{}, - md: datasource.Metadata{Hostname: "md-host", SSHPublicKeys: map[string]string{"key": "ghi"}}, - out: config.CloudConfig{SSHAuthorizedKeys: []string{"ghi"}, Hostname: "md-host"}, - }, - { - // If cc is nil, cc should be returned unchanged(overridden) - md: datasource.Metadata{Hostname: "md-host", SSHPublicKeys: map[string]string{"key": "ghi"}}, - out: config.CloudConfig{SSHAuthorizedKeys: []string{"ghi"}, Hostname: "md-host"}, - }, - { - // user-data should override completely in the case of conflicts - cc: &config.CloudConfig{SSHAuthorizedKeys: []string{"abc", "def"}, Hostname: "cc-host"}, - md: datasource.Metadata{Hostname: "md-host"}, - out: config.CloudConfig{SSHAuthorizedKeys: []string{"abc", "def"}, Hostname: "cc-host"}, - }, - { - // Mixed merge should succeed - cc: &config.CloudConfig{SSHAuthorizedKeys: []string{"abc", "def"}, Hostname: "cc-host"}, - md: datasource.Metadata{Hostname: "md-host", SSHPublicKeys: map[string]string{"key": "ghi"}}, - out: config.CloudConfig{SSHAuthorizedKeys: []string{"abc", "def", "ghi"}, Hostname: "cc-host"}, - }, - { - // Completely non-conflicting merge should be fine - cc: &config.CloudConfig{Hostname: "cc-host"}, - md: datasource.Metadata{SSHPublicKeys: map[string]string{"zaphod": "beeblebrox"}}, - out: config.CloudConfig{Hostname: "cc-host", SSHAuthorizedKeys: []string{"beeblebrox"}}, - }, - { - // Non-mergeable settings in user-data should not be affected - cc: &config.CloudConfig{Hostname: "cc-host", ManageEtcHosts: config.EtcHosts("lolz")}, - md: datasource.Metadata{Hostname: "md-host"}, - out: config.CloudConfig{Hostname: "cc-host", ManageEtcHosts: config.EtcHosts("lolz")}, - }, - } - - for i, tt := range tests { - out := mergeConfigs(tt.cc, tt.md) - if !reflect.DeepEqual(tt.out, out) { - t.Errorf("bad config (%d): want %#v, got %#v", i, tt.out, out) - } - } -} - -func mustDecode(in string) []byte { - out, err := base64.StdEncoding.DecodeString(in) - if err != nil { - panic(err) - } - return out -} - -func TestDecompressIfGzip(t *testing.T) { - tests := []struct { - in []byte - - out []byte - err error - }{ - { - in: nil, - - out: nil, - err: nil, - }, - { - in: []byte{}, - - out: []byte{}, - err: nil, - }, - { - in: mustDecode("H4sIAJWV/VUAA1NOzskvTdFNzs9Ly0wHABt6mQENAAAA"), - - out: []byte("#cloud-config"), - err: nil, - }, - { - in: []byte("#cloud-config"), - - out: []byte("#cloud-config"), - err: nil, - }, - { - in: mustDecode("H4sCORRUPT=="), - - out: nil, - err: errors.New("any error"), - }, - } - for i, tt := range tests { - out, err := decompressIfGzip(tt.in) - if !bytes.Equal(out, tt.out) || (tt.err != nil && err == nil) { - t.Errorf("bad gzip (%d): want (%s, %#v), got (%s, %#v)", i, string(tt.out), tt.err, string(out), err) - } - } - -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/configdrive/configdrive_test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/configdrive/configdrive_test.go deleted file mode 100644 index e40a8fd5..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/datasource/configdrive/configdrive_test.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package configdrive - -import ( - "reflect" - "testing" - - "github.com/coreos/coreos-cloudinit/datasource" - "github.com/coreos/coreos-cloudinit/datasource/test" -) - -func TestFetchMetadata(t *testing.T) { - for _, tt := range []struct { - root string - files test.MockFilesystem - - metadata datasource.Metadata - }{ - { - root: "/", - files: test.NewMockFilesystem(test.File{Path: "/openstack/latest/meta_data.json", Contents: ""}), - }, - { - root: "/", - files: test.NewMockFilesystem(test.File{Path: "/openstack/latest/meta_data.json", Contents: `{"ignore": "me"}`}), - }, - { - root: "/", - files: test.NewMockFilesystem(test.File{Path: "/openstack/latest/meta_data.json", Contents: `{"hostname": "host"}`}), - metadata: datasource.Metadata{Hostname: "host"}, - }, - { - root: "/media/configdrive", - files: test.NewMockFilesystem(test.File{Path: "/media/configdrive/openstack/latest/meta_data.json", Contents: `{"hostname": "host", "network_config": {"content_path": "config_file.json"}, "public_keys":{"1": "key1", "2": "key2"}}`}, - test.File{Path: "/media/configdrive/openstack/config_file.json", Contents: "make it work"}, - ), - metadata: datasource.Metadata{ - Hostname: "host", - NetworkConfig: []byte("make it work"), - SSHPublicKeys: map[string]string{ - "1": "key1", - "2": "key2", - }, - }, - }, - } { - cd := configDrive{tt.root, tt.files.ReadFile} - metadata, err := cd.FetchMetadata() - if err != nil { - t.Fatalf("bad error for %+v: want %v, got %q", tt, nil, err) - } - if !reflect.DeepEqual(tt.metadata, metadata) { - t.Fatalf("bad metadata for %+v: want %#v, got %#v", tt, tt.metadata, metadata) - } - } -} - -func TestFetchUserdata(t *testing.T) { - for _, tt := range []struct { - root string - files test.MockFilesystem - - userdata string - }{ - { - "/", - test.NewMockFilesystem(), - "", - }, - { - "/", - test.NewMockFilesystem(test.File{Path: "/openstack/latest/user_data", Contents: "userdata"}), - "userdata", - }, - { - "/media/configdrive", - test.NewMockFilesystem(test.File{Path: "/media/configdrive/openstack/latest/user_data", Contents: "userdata"}), - "userdata", - }, - } { - cd := configDrive{tt.root, tt.files.ReadFile} - userdata, err := cd.FetchUserdata() - if err != nil { - t.Fatalf("bad error for %+v: want %v, got %q", tt, nil, err) - } - if string(userdata) != tt.userdata { - t.Fatalf("bad userdata for %+v: want %q, got %q", tt, tt.userdata, userdata) - } - } -} - -func TestConfigRoot(t *testing.T) { - for _, tt := range []struct { - root string - configRoot string - }{ - { - "/", - "/openstack", - }, - { - "/media/configdrive", - "/media/configdrive/openstack", - }, - } { - cd := configDrive{tt.root, nil} - if configRoot := cd.ConfigRoot(); configRoot != tt.configRoot { - t.Fatalf("bad config root for %q: want %q, got %q", tt, tt.configRoot, configRoot) - } - } -} - -func TestNewDatasource(t *testing.T) { - for _, tt := range []struct { - root string - expectRoot string - }{ - { - root: "", - expectRoot: "", - }, - { - root: "/media/configdrive", - expectRoot: "/media/configdrive", - }, - } { - service := NewDatasource(tt.root) - if service.root != tt.expectRoot { - t.Fatalf("bad root (%q): want %q, got %q", tt.root, tt.expectRoot, service.root) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean/metadata.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean/metadata.go index fa6c605c..71982d0c 100644 --- a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean/metadata.go +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean/metadata.go @@ -66,7 +66,7 @@ type metadataService struct { } func NewDatasource(root string) *metadataService { - return &metadataService{MetadataService: metadata.NewDatasource(root, apiVersion, userdataUrl, metadataPath)} + return &metadataService{MetadataService: metadata.NewDatasource(root, apiVersion, userdataUrl, metadataPath, nil)} } func (ms *metadataService) FetchMetadata() (metadata datasource.Metadata, err error) { diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean/metadata_test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean/metadata_test.go deleted file mode 100644 index fd8a0a0c..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean/metadata_test.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digitalocean - -import ( - "fmt" - "net" - "reflect" - "testing" - - "github.com/coreos/coreos-cloudinit/datasource" - "github.com/coreos/coreos-cloudinit/datasource/metadata" - "github.com/coreos/coreos-cloudinit/datasource/metadata/test" - "github.com/coreos/coreos-cloudinit/pkg" -) - -func TestType(t *testing.T) { - want := "digitalocean-metadata-service" - if kind := (metadataService{}).Type(); kind != want { - t.Fatalf("bad type: want %q, got %q", want, kind) - } -} - -func TestFetchMetadata(t *testing.T) { - for _, tt := range []struct { - root string - metadataPath string - resources map[string]string - expect datasource.Metadata - clientErr error - expectErr error - }{ - { - root: "/", - metadataPath: "v1.json", - resources: map[string]string{ - "/v1.json": "bad", - }, - expectErr: fmt.Errorf("invalid character 'b' looking for beginning of value"), - }, - { - root: "/", - metadataPath: "v1.json", - resources: map[string]string{ - "/v1.json": `{ - "droplet_id": 1, - "user_data": "hello", - "vendor_data": "hello", - "public_keys": [ - "publickey1", - "publickey2" - ], - "region": "nyc2", - "interfaces": { - "public": [ - { - "ipv4": { - "ip_address": "192.168.1.2", - "netmask": "255.255.255.0", - "gateway": "192.168.1.1" - }, - "ipv6": { - "ip_address": "fe00::", - "cidr": 126, - "gateway": "fe00::" - }, - "mac": "ab:cd:ef:gh:ij", - "type": "public" - } - ] - } -}`, - }, - expect: datasource.Metadata{ - PublicIPv4: net.ParseIP("192.168.1.2"), - PublicIPv6: net.ParseIP("fe00::"), - SSHPublicKeys: map[string]string{ - "0": "publickey1", - "1": "publickey2", - }, - NetworkConfig: Metadata{ - Interfaces: Interfaces{ - Public: []Interface{ - Interface{ - IPv4: &Address{ - IPAddress: "192.168.1.2", - Netmask: "255.255.255.0", - Gateway: "192.168.1.1", - }, - IPv6: &Address{ - IPAddress: "fe00::", - Cidr: 126, - Gateway: "fe00::", - }, - MAC: "ab:cd:ef:gh:ij", - Type: "public", - }, - }, - }, - PublicKeys: []string{"publickey1", "publickey2"}, - }, - }, - }, - { - clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")}, - expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")}, - }, - } { - service := &metadataService{ - MetadataService: metadata.MetadataService{ - Root: tt.root, - Client: &test.HttpClient{Resources: tt.resources, Err: tt.clientErr}, - MetadataPath: tt.metadataPath, - }, - } - metadata, err := service.FetchMetadata() - if Error(err) != Error(tt.expectErr) { - t.Fatalf("bad error (%q): want %q, got %q", tt.resources, tt.expectErr, err) - } - if !reflect.DeepEqual(tt.expect, metadata) { - t.Fatalf("bad fetch (%q): want %#q, got %#q", tt.resources, tt.expect, metadata) - } - } -} - -func Error(err error) string { - if err != nil { - return err.Error() - } - return "" -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/ec2/metadata.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/ec2/metadata.go index 141fdc94..5489e1d3 100644 --- a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/ec2/metadata.go +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/ec2/metadata.go @@ -39,7 +39,7 @@ type metadataService struct { } func NewDatasource(root string) *metadataService { - return &metadataService{metadata.NewDatasource(root, apiVersion, userdataPath, metadataPath)} + return &metadataService{metadata.NewDatasource(root, apiVersion, userdataPath, metadataPath, nil)} } func (ms metadataService) FetchMetadata() (datasource.Metadata, error) { diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/ec2/metadata_test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/ec2/metadata_test.go deleted file mode 100644 index ba463c28..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/ec2/metadata_test.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ec2 - -import ( - "fmt" - "net" - "reflect" - "testing" - - "github.com/coreos/coreos-cloudinit/datasource" - "github.com/coreos/coreos-cloudinit/datasource/metadata" - "github.com/coreos/coreos-cloudinit/datasource/metadata/test" - "github.com/coreos/coreos-cloudinit/pkg" -) - -func TestType(t *testing.T) { - want := "ec2-metadata-service" - if kind := (metadataService{}).Type(); kind != want { - t.Fatalf("bad type: want %q, got %q", want, kind) - } -} - -func TestFetchAttributes(t *testing.T) { - for _, s := range []struct { - resources map[string]string - err error - tests []struct { - path string - val []string - } - }{ - { - resources: map[string]string{ - "/": "a\nb\nc/", - "/c/": "d\ne/", - "/c/e/": "f", - "/a": "1", - "/b": "2", - "/c/d": "3", - "/c/e/f": "4", - }, - tests: []struct { - path string - val []string - }{ - {"/", []string{"a", "b", "c/"}}, - {"/b", []string{"2"}}, - {"/c/d", []string{"3"}}, - {"/c/e/", []string{"f"}}, - }, - }, - { - err: fmt.Errorf("test error"), - tests: []struct { - path string - val []string - }{ - {"", nil}, - }, - }, - } { - service := metadataService{metadata.MetadataService{ - Client: &test.HttpClient{Resources: s.resources, Err: s.err}, - }} - for _, tt := range s.tests { - attrs, err := service.fetchAttributes(tt.path) - if err != s.err { - t.Fatalf("bad error for %q (%q): want %q, got %q", tt.path, s.resources, s.err, err) - } - if !reflect.DeepEqual(attrs, tt.val) { - t.Fatalf("bad fetch for %q (%q): want %q, got %q", tt.path, s.resources, tt.val, attrs) - } - } - } -} - -func TestFetchAttribute(t *testing.T) { - for _, s := range []struct { - resources map[string]string - err error - tests []struct { - path string - val string - } - }{ - { - resources: map[string]string{ - "/": "a\nb\nc/", - "/c/": "d\ne/", - "/c/e/": "f", - "/a": "1", - "/b": "2", - "/c/d": "3", - "/c/e/f": "4", - }, - tests: []struct { - path string - val string - }{ - {"/a", "1"}, - {"/b", "2"}, - {"/c/d", "3"}, - {"/c/e/f", "4"}, - }, - }, - { - err: fmt.Errorf("test error"), - tests: []struct { - path string - val string - }{ - {"", ""}, - }, - }, - } { - service := metadataService{metadata.MetadataService{ - Client: &test.HttpClient{Resources: s.resources, Err: s.err}, - }} - for _, tt := range s.tests { - attr, err := service.fetchAttribute(tt.path) - if err != s.err { - t.Fatalf("bad error for %q (%q): want %q, got %q", tt.path, s.resources, s.err, err) - } - if attr != tt.val { - t.Fatalf("bad fetch for %q (%q): want %q, got %q", tt.path, s.resources, tt.val, attr) - } - } - } -} - -func TestFetchMetadata(t *testing.T) { - for _, tt := range []struct { - root string - metadataPath string - resources map[string]string - expect datasource.Metadata - clientErr error - expectErr error - }{ - { - root: "/", - metadataPath: "2009-04-04/meta-data", - resources: map[string]string{ - "/2009-04-04/meta-data/public-keys": "bad\n", - }, - expectErr: fmt.Errorf("malformed public key: \"bad\""), - }, - { - root: "/", - metadataPath: "2009-04-04/meta-data", - resources: map[string]string{ - "/2009-04-04/meta-data/hostname": "host", - "/2009-04-04/meta-data/local-ipv4": "1.2.3.4", - "/2009-04-04/meta-data/public-ipv4": "5.6.7.8", - "/2009-04-04/meta-data/public-keys": "0=test1\n", - "/2009-04-04/meta-data/public-keys/0": "openssh-key", - "/2009-04-04/meta-data/public-keys/0/openssh-key": "key", - }, - expect: datasource.Metadata{ - Hostname: "host", - PrivateIPv4: net.ParseIP("1.2.3.4"), - PublicIPv4: net.ParseIP("5.6.7.8"), - SSHPublicKeys: map[string]string{"test1": "key"}, - }, - }, - { - root: "/", - metadataPath: "2009-04-04/meta-data", - resources: map[string]string{ - "/2009-04-04/meta-data/hostname": "host domain another_domain", - "/2009-04-04/meta-data/local-ipv4": "1.2.3.4", - "/2009-04-04/meta-data/public-ipv4": "5.6.7.8", - "/2009-04-04/meta-data/public-keys": "0=test1\n", - "/2009-04-04/meta-data/public-keys/0": "openssh-key", - "/2009-04-04/meta-data/public-keys/0/openssh-key": "key", - }, - expect: datasource.Metadata{ - Hostname: "host", - PrivateIPv4: net.ParseIP("1.2.3.4"), - PublicIPv4: net.ParseIP("5.6.7.8"), - SSHPublicKeys: map[string]string{"test1": "key"}, - }, - }, - { - clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")}, - expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")}, - }, - } { - service := &metadataService{metadata.MetadataService{ - Root: tt.root, - Client: &test.HttpClient{Resources: tt.resources, Err: tt.clientErr}, - MetadataPath: tt.metadataPath, - }} - metadata, err := service.FetchMetadata() - if Error(err) != Error(tt.expectErr) { - t.Fatalf("bad error (%q): want %q, got %q", tt.resources, tt.expectErr, err) - } - if !reflect.DeepEqual(tt.expect, metadata) { - t.Fatalf("bad fetch (%q): want %#v, got %#v", tt.resources, tt.expect, metadata) - } - } -} - -func Error(err error) string { - if err != nil { - return err.Error() - } - return "" -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/gce/metadata.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/gce/metadata.go new file mode 100644 index 00000000..494741c7 --- /dev/null +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/gce/metadata.go @@ -0,0 +1,89 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gce + +import ( + "fmt" + "net" + "net/http" + + "github.com/coreos/coreos-cloudinit/datasource" + "github.com/coreos/coreos-cloudinit/datasource/metadata" +) + +const ( + apiVersion = "computeMetadata/v1/" + metadataPath = apiVersion + "instance/" + userdataPath = apiVersion + "instance/attributes/user-data" +) + +type metadataService struct { + metadata.MetadataService +} + +func NewDatasource(root string) *metadataService { + return &metadataService{metadata.NewDatasource(root, apiVersion, userdataPath, metadataPath, http.Header{"Metadata-Flavor": {"Google"}})} +} + +func (ms metadataService) FetchMetadata() (datasource.Metadata, error) { + public, err := ms.fetchIP("network-interfaces/0/access-configs/0/external-ip") + if err != nil { + return datasource.Metadata{}, err + } + local, err := ms.fetchIP("network-interfaces/0/ip") + if err != nil { + return datasource.Metadata{}, err + } + hostname, err := ms.fetchString("hostname") + if err != nil { + return datasource.Metadata{}, err + } + + return datasource.Metadata{ + PublicIPv4: public, + PrivateIPv4: local, + Hostname: hostname, + }, nil +} + +func (ms metadataService) Type() string { + return "gce-metadata-service" +} + +func (ms metadataService) fetchString(key string) (string, error) { + data, err := ms.FetchData(ms.MetadataUrl() + key) + if err != nil { + return "", err + } + + return string(data), nil +} + +func (ms metadataService) fetchIP(key string) (net.IP, error) { + str, err := ms.fetchString(key) + if err != nil { + return nil, err + } + + if str == "" { + return nil, nil + } + + if ip := net.ParseIP(str); ip != nil { + return ip, nil + } else { + return nil, fmt.Errorf("couldn't parse %q as IP address", str) + } +} diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata.go index 2baa4f17..9d52e933 100644 --- a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata.go +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata.go @@ -15,6 +15,7 @@ package metadata import ( + "net/http" "strings" "github.com/coreos/coreos-cloudinit/pkg" @@ -28,11 +29,11 @@ type MetadataService struct { MetadataPath string } -func NewDatasource(root, apiVersion, userdataPath, metadataPath string) MetadataService { +func NewDatasource(root, apiVersion, userdataPath, metadataPath string, header http.Header) MetadataService { if !strings.HasSuffix(root, "/") { root += "/" } - return MetadataService{root, pkg.NewHttpClient(), apiVersion, userdataPath, metadataPath} + return MetadataService{root, pkg.NewHttpClientHeader(header), apiVersion, userdataPath, metadataPath} } func (ms MetadataService) IsAvailable() bool { diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata_test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata_test.go deleted file mode 100644 index 9d6b4b29..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata_test.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metadata - -import ( - "bytes" - "fmt" - "testing" - - "github.com/coreos/coreos-cloudinit/datasource/metadata/test" - "github.com/coreos/coreos-cloudinit/pkg" -) - -func TestAvailabilityChanges(t *testing.T) { - want := true - if ac := (MetadataService{}).AvailabilityChanges(); ac != want { - t.Fatalf("bad AvailabilityChanges: want %t, got %t", want, ac) - } -} - -func TestIsAvailable(t *testing.T) { - for _, tt := range []struct { - root string - apiVersion string - resources map[string]string - expect bool - }{ - { - root: "/", - apiVersion: "2009-04-04", - resources: map[string]string{ - "/2009-04-04": "", - }, - expect: true, - }, - { - root: "/", - resources: map[string]string{}, - expect: false, - }, - } { - service := &MetadataService{ - Root: tt.root, - Client: &test.HttpClient{Resources: tt.resources, Err: nil}, - ApiVersion: tt.apiVersion, - } - if a := service.IsAvailable(); a != tt.expect { - t.Fatalf("bad isAvailable (%q): want %t, got %t", tt.resources, tt.expect, a) - } - } -} - -func TestFetchUserdata(t *testing.T) { - for _, tt := range []struct { - root string - userdataPath string - resources map[string]string - userdata []byte - clientErr error - expectErr error - }{ - { - root: "/", - userdataPath: "2009-04-04/user-data", - resources: map[string]string{ - "/2009-04-04/user-data": "hello", - }, - userdata: []byte("hello"), - }, - { - root: "/", - clientErr: pkg.ErrNotFound{Err: fmt.Errorf("test not found error")}, - userdata: []byte{}, - }, - { - root: "/", - clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test timeout error")}, - expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test timeout error")}, - }, - } { - service := &MetadataService{ - Root: tt.root, - Client: &test.HttpClient{Resources: tt.resources, Err: tt.clientErr}, - UserdataPath: tt.userdataPath, - } - data, err := service.FetchUserdata() - if Error(err) != Error(tt.expectErr) { - t.Fatalf("bad error (%q): want %q, got %q", tt.resources, tt.expectErr, err) - } - if !bytes.Equal(data, tt.userdata) { - t.Fatalf("bad userdata (%q): want %q, got %q", tt.resources, tt.userdata, data) - } - } -} - -func TestUrls(t *testing.T) { - for _, tt := range []struct { - root string - userdataPath string - metadataPath string - expectRoot string - userdata string - metadata string - }{ - { - root: "/", - userdataPath: "2009-04-04/user-data", - metadataPath: "2009-04-04/meta-data", - expectRoot: "/", - userdata: "/2009-04-04/user-data", - metadata: "/2009-04-04/meta-data", - }, - { - root: "http://169.254.169.254/", - userdataPath: "2009-04-04/user-data", - metadataPath: "2009-04-04/meta-data", - expectRoot: "http://169.254.169.254/", - userdata: "http://169.254.169.254/2009-04-04/user-data", - metadata: "http://169.254.169.254/2009-04-04/meta-data", - }, - } { - service := &MetadataService{ - Root: tt.root, - UserdataPath: tt.userdataPath, - MetadataPath: tt.metadataPath, - } - if url := service.UserdataUrl(); url != tt.userdata { - t.Fatalf("bad url (%q): want %q, got %q", tt.root, tt.userdata, url) - } - if url := service.MetadataUrl(); url != tt.metadata { - t.Fatalf("bad url (%q): want %q, got %q", tt.root, tt.metadata, url) - } - if url := service.ConfigRoot(); url != tt.expectRoot { - t.Fatalf("bad url (%q): want %q, got %q", tt.root, tt.expectRoot, url) - } - } -} - -func TestNewDatasource(t *testing.T) { - for _, tt := range []struct { - root string - expectRoot string - }{ - { - root: "", - expectRoot: "/", - }, - { - root: "/", - expectRoot: "/", - }, - { - root: "http://169.254.169.254", - expectRoot: "http://169.254.169.254/", - }, - { - root: "http://169.254.169.254/", - expectRoot: "http://169.254.169.254/", - }, - } { - service := NewDatasource(tt.root, "", "", "") - if service.Root != tt.expectRoot { - t.Fatalf("bad root (%q): want %q, got %q", tt.root, tt.expectRoot, service.Root) - } - } -} - -func Error(err error) string { - if err != nil { - return err.Error() - } - return "" -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/packet/metadata.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/packet/metadata.go index 3c7d499d..19d3931a 100644 --- a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/packet/metadata.go +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/packet/metadata.go @@ -62,7 +62,7 @@ type metadataService struct { } func NewDatasource(root string) *metadataService { - return &metadataService{MetadataService: metadata.NewDatasource(root, apiVersion, userdataUrl, metadataPath)} + return &metadataService{MetadataService: metadata.NewDatasource(root, apiVersion, userdataUrl, metadataPath, nil)} } func (ms *metadataService) FetchMetadata() (metadata datasource.Metadata, err error) { diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/proc_cmdline/proc_cmdline_test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/proc_cmdline/proc_cmdline_test.go deleted file mode 100644 index a0245812..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/datasource/proc_cmdline/proc_cmdline_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package proc_cmdline - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "testing" -) - -func TestParseCmdlineCloudConfigFound(t *testing.T) { - tests := []struct { - input string - expect string - }{ - { - "cloud-config-url=example.com", - "example.com", - }, - { - "cloud_config_url=example.com", - "example.com", - }, - { - "cloud-config-url cloud-config-url=example.com", - "example.com", - }, - { - "cloud-config-url= cloud-config-url=example.com", - "example.com", - }, - { - "cloud-config-url=one.example.com cloud-config-url=two.example.com", - "two.example.com", - }, - { - "foo=bar cloud-config-url=example.com ping=pong", - "example.com", - }, - } - - for i, tt := range tests { - output, err := findCloudConfigURL(tt.input) - if output != tt.expect { - t.Errorf("Test case %d failed: %s != %s", i, output, tt.expect) - } - if err != nil { - t.Errorf("Test case %d produced error: %v", i, err) - } - } -} - -func TestProcCmdlineAndFetchConfig(t *testing.T) { - - var ( - ProcCmdlineTmpl = "foo=bar cloud-config-url=%s/config\n" - CloudConfigContent = "#cloud-config\n" - ) - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "GET" && r.RequestURI == "/config" { - fmt.Fprint(w, CloudConfigContent) - } - })) - defer ts.Close() - - file, err := ioutil.TempFile(os.TempDir(), "test_proc_cmdline") - defer os.Remove(file.Name()) - if err != nil { - t.Errorf("Test produced error: %v", err) - } - _, err = file.Write([]byte(fmt.Sprintf(ProcCmdlineTmpl, ts.URL))) - if err != nil { - t.Errorf("Test produced error: %v", err) - } - - p := NewDatasource() - p.Location = file.Name() - cfg, err := p.FetchUserdata() - if err != nil { - t.Errorf("Test produced error: %v", err) - } - - if string(cfg) != CloudConfigContent { - t.Errorf("Test failed, response body: %s != %s", cfg, CloudConfigContent) - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/config.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/config.go deleted file mode 100644 index 94a47fa3..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/initialize/config.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package initialize - -import ( - "errors" - "fmt" - "log" - "path" - - "github.com/coreos/coreos-cloudinit/config" - "github.com/coreos/coreos-cloudinit/network" - "github.com/coreos/coreos-cloudinit/system" -) - -// CloudConfigFile represents a CoreOS specific configuration option that can generate -// an associated system.File to be written to disk -type CloudConfigFile interface { - // File should either return (*system.File, error), or (nil, nil) if nothing - // needs to be done for this configuration option. - File() (*system.File, error) -} - -// CloudConfigUnit represents a CoreOS specific configuration option that can generate -// associated system.Units to be created/enabled appropriately -type CloudConfigUnit interface { - Units() []system.Unit -} - -// Apply renders a CloudConfig to an Environment. This can involve things like -// configuring the hostname, adding new users, writing various configuration -// files to disk, and manipulating systemd services. -func Apply(cfg config.CloudConfig, ifaces []network.InterfaceGenerator, env *Environment) error { - if cfg.Hostname != "" { - if err := system.SetHostname(cfg.Hostname); err != nil { - return err - } - log.Printf("Set hostname to %s", cfg.Hostname) - } - - for _, user := range cfg.Users { - if user.Name == "" { - log.Printf("User object has no 'name' field, skipping") - continue - } - - if system.UserExists(&user) { - log.Printf("User '%s' exists, ignoring creation-time fields", user.Name) - if user.PasswordHash != "" { - log.Printf("Setting '%s' user's password", user.Name) - if err := system.SetUserPassword(user.Name, user.PasswordHash); err != nil { - log.Printf("Failed setting '%s' user's password: %v", user.Name, err) - return err - } - } - } else { - log.Printf("Creating user '%s'", user.Name) - if err := system.CreateUser(&user); err != nil { - log.Printf("Failed creating user '%s': %v", user.Name, err) - return err - } - } - - if len(user.SSHAuthorizedKeys) > 0 { - log.Printf("Authorizing %d SSH keys for user '%s'", len(user.SSHAuthorizedKeys), user.Name) - if err := system.AuthorizeSSHKeys(user.Name, env.SSHKeyName(), user.SSHAuthorizedKeys); err != nil { - return err - } - } - if user.SSHImportGithubUser != "" { - log.Printf("Authorizing github user %s SSH keys for CoreOS user '%s'", user.SSHImportGithubUser, user.Name) - if err := SSHImportGithubUser(user.Name, user.SSHImportGithubUser); err != nil { - return err - } - } - for _, u := range user.SSHImportGithubUsers { - log.Printf("Authorizing github user %s SSH keys for CoreOS user '%s'", u, user.Name) - if err := SSHImportGithubUser(user.Name, u); err != nil { - return err - } - } - if user.SSHImportURL != "" { - log.Printf("Authorizing SSH keys for CoreOS user '%s' from '%s'", user.Name, user.SSHImportURL) - if err := SSHImportKeysFromURL(user.Name, user.SSHImportURL); err != nil { - return err - } - } - } - - if len(cfg.SSHAuthorizedKeys) > 0 { - err := system.AuthorizeSSHKeys("core", env.SSHKeyName(), cfg.SSHAuthorizedKeys) - if err == nil { - log.Printf("Authorized SSH keys for core user") - } else { - return err - } - } - - var writeFiles []system.File - for _, file := range cfg.WriteFiles { - writeFiles = append(writeFiles, system.File{File: file}) - } - - for _, ccf := range []CloudConfigFile{ - system.OEM{OEM: cfg.CoreOS.OEM}, - system.Update{Update: cfg.CoreOS.Update, ReadConfig: system.DefaultReadConfig}, - system.EtcHosts{EtcHosts: cfg.ManageEtcHosts}, - system.Flannel{Flannel: cfg.CoreOS.Flannel}, - } { - f, err := ccf.File() - if err != nil { - return err - } - if f != nil { - writeFiles = append(writeFiles, *f) - } - } - - var units []system.Unit - for _, u := range cfg.CoreOS.Units { - units = append(units, system.Unit{Unit: u}) - } - - for _, ccu := range []CloudConfigUnit{ - system.Etcd{Etcd: cfg.CoreOS.Etcd}, - system.Etcd2{Etcd2: cfg.CoreOS.Etcd2}, - system.Fleet{Fleet: cfg.CoreOS.Fleet}, - system.Locksmith{Locksmith: cfg.CoreOS.Locksmith}, - system.Update{Update: cfg.CoreOS.Update, ReadConfig: system.DefaultReadConfig}, - } { - units = append(units, ccu.Units()...) - } - - wroteEnvironment := false - for _, file := range writeFiles { - fullPath, err := system.WriteFile(&file, env.Root()) - if err != nil { - return err - } - if path.Clean(file.Path) == "/etc/environment" { - wroteEnvironment = true - } - log.Printf("Wrote file %s to filesystem", fullPath) - } - - if !wroteEnvironment { - ef := env.DefaultEnvironmentFile() - if ef != nil { - err := system.WriteEnvFile(ef, env.Root()) - if err != nil { - return err - } - log.Printf("Updated /etc/environment") - } - } - - if len(ifaces) > 0 { - units = append(units, createNetworkingUnits(ifaces)...) - if err := system.RestartNetwork(ifaces); err != nil { - return err - } - } - - um := system.NewUnitManager(env.Root()) - return processUnits(units, env.Root(), um) -} - -func createNetworkingUnits(interfaces []network.InterfaceGenerator) (units []system.Unit) { - appendNewUnit := func(units []system.Unit, name, content string) []system.Unit { - if content == "" { - return units - } - return append(units, system.Unit{Unit: config.Unit{ - Name: name, - Runtime: true, - Content: content, - }}) - } - for _, i := range interfaces { - units = appendNewUnit(units, fmt.Sprintf("%s.netdev", i.Filename()), i.Netdev()) - units = appendNewUnit(units, fmt.Sprintf("%s.link", i.Filename()), i.Link()) - units = appendNewUnit(units, fmt.Sprintf("%s.network", i.Filename()), i.Network()) - } - return units -} - -// processUnits takes a set of Units and applies them to the given root using -// the given UnitManager. This can involve things like writing unit files to -// disk, masking/unmasking units, or invoking systemd -// commands against units. It returns any error encountered. -func processUnits(units []system.Unit, root string, um system.UnitManager) error { - type action struct { - unit system.Unit - command string - } - actions := make([]action, 0, len(units)) - reload := false - restartNetworkd := false - for _, unit := range units { - if unit.Name == "" { - log.Printf("Skipping unit without name") - continue - } - - if unit.Content != "" { - log.Printf("Writing unit %q to filesystem", unit.Name) - if err := um.PlaceUnit(unit); err != nil { - return err - } - log.Printf("Wrote unit %q", unit.Name) - reload = true - } - - for _, dropin := range unit.DropIns { - if dropin.Name != "" && dropin.Content != "" { - log.Printf("Writing drop-in unit %q to filesystem", dropin.Name) - if err := um.PlaceUnitDropIn(unit, dropin); err != nil { - return err - } - log.Printf("Wrote drop-in unit %q", dropin.Name) - reload = true - } - } - - if unit.Mask { - log.Printf("Masking unit file %q", unit.Name) - if err := um.MaskUnit(unit); err != nil { - return err - } - } else if unit.Runtime { - log.Printf("Ensuring runtime unit file %q is unmasked", unit.Name) - if err := um.UnmaskUnit(unit); err != nil { - return err - } - } - - if unit.Enable { - if unit.Group() != "network" { - log.Printf("Enabling unit file %q", unit.Name) - if err := um.EnableUnitFile(unit); err != nil { - return err - } - log.Printf("Enabled unit %q", unit.Name) - } else { - log.Printf("Skipping enable for network-like unit %q", unit.Name) - } - } - - if unit.Group() == "network" { - restartNetworkd = true - } else if unit.Command != "" { - actions = append(actions, action{unit, unit.Command}) - } - } - - if reload { - if err := um.DaemonReload(); err != nil { - return errors.New(fmt.Sprintf("failed systemd daemon-reload: %s", err)) - } - } - - if restartNetworkd { - log.Printf("Restarting systemd-networkd") - networkd := system.Unit{Unit: config.Unit{Name: "systemd-networkd.service"}} - res, err := um.RunUnitCommand(networkd, "restart") - if err != nil { - return err - } - log.Printf("Restarted systemd-networkd (%s)", res) - } - - for _, action := range actions { - log.Printf("Calling unit command %q on %q'", action.command, action.unit.Name) - res, err := um.RunUnitCommand(action.unit, action.command) - if err != nil { - return err - } - log.Printf("Result of %q on %q: %s", action.command, action.unit.Name, res) - } - - return nil -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/config_test.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/config_test.go deleted file mode 100644 index 33be737e..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/initialize/config_test.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package initialize - -import ( - "reflect" - "testing" - - "github.com/coreos/coreos-cloudinit/config" - "github.com/coreos/coreos-cloudinit/network" - "github.com/coreos/coreos-cloudinit/system" -) - -type TestUnitManager struct { - placed []string - enabled []string - masked []string - unmasked []string - commands []UnitAction - reload bool -} - -type UnitAction struct { - unit string - command string -} - -func (tum *TestUnitManager) PlaceUnit(u system.Unit) error { - tum.placed = append(tum.placed, u.Name) - return nil -} -func (tum *TestUnitManager) PlaceUnitDropIn(u system.Unit, d config.UnitDropIn) error { - tum.placed = append(tum.placed, u.Name+".d/"+d.Name) - return nil -} -func (tum *TestUnitManager) EnableUnitFile(u system.Unit) error { - tum.enabled = append(tum.enabled, u.Name) - return nil -} -func (tum *TestUnitManager) RunUnitCommand(u system.Unit, c string) (string, error) { - tum.commands = append(tum.commands, UnitAction{u.Name, c}) - return "", nil -} -func (tum *TestUnitManager) DaemonReload() error { - tum.reload = true - return nil -} -func (tum *TestUnitManager) MaskUnit(u system.Unit) error { - tum.masked = append(tum.masked, u.Name) - return nil -} -func (tum *TestUnitManager) UnmaskUnit(u system.Unit) error { - tum.unmasked = append(tum.unmasked, u.Name) - return nil -} - -type mockInterface struct { - name string - filename string - netdev string - link string - network string - kind string - modprobeParams string -} - -func (i mockInterface) Name() string { - return i.name -} - -func (i mockInterface) Filename() string { - return i.filename -} - -func (i mockInterface) Netdev() string { - return i.netdev -} - -func (i mockInterface) Link() string { - return i.link -} - -func (i mockInterface) Network() string { - return i.network -} - -func (i mockInterface) Type() string { - return i.kind -} - -func (i mockInterface) ModprobeParams() string { - return i.modprobeParams -} - -func TestCreateNetworkingUnits(t *testing.T) { - for _, tt := range []struct { - interfaces []network.InterfaceGenerator - expect []system.Unit - }{ - {nil, nil}, - { - []network.InterfaceGenerator{ - network.InterfaceGenerator(mockInterface{filename: "test"}), - }, - nil, - }, - { - []network.InterfaceGenerator{ - network.InterfaceGenerator(mockInterface{filename: "test1", netdev: "test netdev"}), - network.InterfaceGenerator(mockInterface{filename: "test2", link: "test link"}), - network.InterfaceGenerator(mockInterface{filename: "test3", network: "test network"}), - }, - []system.Unit{ - system.Unit{Unit: config.Unit{Name: "test1.netdev", Runtime: true, Content: "test netdev"}}, - system.Unit{Unit: config.Unit{Name: "test2.link", Runtime: true, Content: "test link"}}, - system.Unit{Unit: config.Unit{Name: "test3.network", Runtime: true, Content: "test network"}}, - }, - }, - { - []network.InterfaceGenerator{ - network.InterfaceGenerator(mockInterface{filename: "test", netdev: "test netdev", link: "test link", network: "test network"}), - }, - []system.Unit{ - system.Unit{Unit: config.Unit{Name: "test.netdev", Runtime: true, Content: "test netdev"}}, - system.Unit{Unit: config.Unit{Name: "test.link", Runtime: true, Content: "test link"}}, - system.Unit{Unit: config.Unit{Name: "test.network", Runtime: true, Content: "test network"}}, - }, - }, - } { - units := createNetworkingUnits(tt.interfaces) - if !reflect.DeepEqual(tt.expect, units) { - t.Errorf("bad units (%+v): want %#v, got %#v", tt.interfaces, tt.expect, units) - } - } -} - -func TestProcessUnits(t *testing.T) { - tests := []struct { - units []system.Unit - - result TestUnitManager - }{ - { - units: []system.Unit{ - system.Unit{Unit: config.Unit{ - Name: "foo", - Mask: true, - }}, - }, - result: TestUnitManager{ - masked: []string{"foo"}, - }, - }, - { - units: []system.Unit{ - system.Unit{Unit: config.Unit{ - Name: "baz.service", - Content: "[Service]\nExecStart=/bin/baz", - Command: "start", - }}, - system.Unit{Unit: config.Unit{ - Name: "foo.network", - Content: "[Network]\nFoo=true", - }}, - system.Unit{Unit: config.Unit{ - Name: "bar.network", - Content: "[Network]\nBar=true", - }}, - }, - result: TestUnitManager{ - placed: []string{"baz.service", "foo.network", "bar.network"}, - commands: []UnitAction{ - UnitAction{"systemd-networkd.service", "restart"}, - UnitAction{"baz.service", "start"}, - }, - reload: true, - }, - }, - { - units: []system.Unit{ - system.Unit{Unit: config.Unit{ - Name: "baz.service", - Content: "[Service]\nExecStart=/bin/true", - }}, - }, - result: TestUnitManager{ - placed: []string{"baz.service"}, - reload: true, - }, - }, - { - units: []system.Unit{ - system.Unit{Unit: config.Unit{ - Name: "locksmithd.service", - Runtime: true, - }}, - }, - result: TestUnitManager{ - unmasked: []string{"locksmithd.service"}, - }, - }, - { - units: []system.Unit{ - system.Unit{Unit: config.Unit{ - Name: "woof", - Enable: true, - }}, - }, - result: TestUnitManager{ - enabled: []string{"woof"}, - }, - }, - { - units: []system.Unit{ - system.Unit{Unit: config.Unit{ - Name: "hi.service", - Runtime: true, - Content: "[Service]\nExecStart=/bin/echo hi", - DropIns: []config.UnitDropIn{ - { - Name: "lo.conf", - Content: "[Service]\nExecStart=/bin/echo lo", - }, - { - Name: "bye.conf", - Content: "[Service]\nExecStart=/bin/echo bye", - }, - }, - }}, - }, - result: TestUnitManager{ - placed: []string{"hi.service", "hi.service.d/lo.conf", "hi.service.d/bye.conf"}, - unmasked: []string{"hi.service"}, - reload: true, - }, - }, - { - units: []system.Unit{ - system.Unit{Unit: config.Unit{ - DropIns: []config.UnitDropIn{ - { - Name: "lo.conf", - Content: "[Service]\nExecStart=/bin/echo lo", - }, - }, - }}, - }, - result: TestUnitManager{}, - }, - { - units: []system.Unit{ - system.Unit{Unit: config.Unit{ - Name: "hi.service", - DropIns: []config.UnitDropIn{ - { - Content: "[Service]\nExecStart=/bin/echo lo", - }, - }, - }}, - }, - result: TestUnitManager{}, - }, - { - units: []system.Unit{ - system.Unit{Unit: config.Unit{ - Name: "hi.service", - DropIns: []config.UnitDropIn{ - { - Name: "lo.conf", - }, - }, - }}, - }, - result: TestUnitManager{}, - }, - } - - for _, tt := range tests { - tum := &TestUnitManager{} - if err := processUnits(tt.units, "", tum); err != nil { - t.Errorf("bad error (%+v): want nil, got %s", tt.units, err) - } - if !reflect.DeepEqual(tt.result, *tum) { - t.Errorf("bad result (%+v): want %+v, got %+v", tt.units, tt.result, tum) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/env_test.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/env_test.go deleted file mode 100644 index abb770cf..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/initialize/env_test.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package initialize - -import ( - "io/ioutil" - "net" - "os" - "path" - "testing" - - "github.com/coreos/coreos-cloudinit/datasource" - "github.com/coreos/coreos-cloudinit/system" -) - -func TestEnvironmentApply(t *testing.T) { - os.Setenv("COREOS_PUBLIC_IPV4", "1.2.3.4") - os.Setenv("COREOS_PRIVATE_IPV4", "5.6.7.8") - os.Setenv("COREOS_PUBLIC_IPV6", "1234::") - os.Setenv("COREOS_PRIVATE_IPV6", "5678::") - for _, tt := range []struct { - metadata datasource.Metadata - input string - out string - }{ - { - // Substituting both values directly should always take precedence - // over environment variables - datasource.Metadata{ - PublicIPv4: net.ParseIP("192.0.2.3"), - PrivateIPv4: net.ParseIP("192.0.2.203"), - PublicIPv6: net.ParseIP("fe00:1234::"), - PrivateIPv6: net.ParseIP("fe00:5678::"), - }, - `[Service] -ExecStart=/usr/bin/echo "$public_ipv4 $public_ipv6" -ExecStop=/usr/bin/echo $private_ipv4 $private_ipv6 -ExecStop=/usr/bin/echo $unknown`, - `[Service] -ExecStart=/usr/bin/echo "192.0.2.3 fe00:1234::" -ExecStop=/usr/bin/echo 192.0.2.203 fe00:5678:: -ExecStop=/usr/bin/echo $unknown`, - }, - { - // Substituting one value directly while falling back with the other - datasource.Metadata{ - PrivateIPv4: net.ParseIP("127.0.0.1"), - }, - "$private_ipv4\n$public_ipv4", - "127.0.0.1\n1.2.3.4", - }, - { - // Falling back to environment variables for both values - datasource.Metadata{}, - "$private_ipv4\n$public_ipv4", - "5.6.7.8\n1.2.3.4", - }, - { - // No substitutions - datasource.Metadata{}, - "$private_ipv4\nfoobar", - "5.6.7.8\nfoobar", - }, - { - // Escaping substitutions - datasource.Metadata{ - PrivateIPv4: net.ParseIP("127.0.0.1"), - }, - `\$private_ipv4 -$private_ipv4 -addr: \$private_ipv4 -\\$private_ipv4`, - `$private_ipv4 -127.0.0.1 -addr: $private_ipv4 -\$private_ipv4`, - }, - { - // No substitutions with escaping - datasource.Metadata{}, - "\\$test\n$test", - "\\$test\n$test", - }, - } { - - env := NewEnvironment("./", "./", "./", "", tt.metadata) - got := env.Apply(tt.input) - if got != tt.out { - t.Fatalf("Environment incorrectly applied.\ngot:\n%s\nwant:\n%s", got, tt.out) - } - } -} - -func TestEnvironmentFile(t *testing.T) { - metadata := datasource.Metadata{ - PublicIPv4: net.ParseIP("1.2.3.4"), - PrivateIPv4: net.ParseIP("5.6.7.8"), - PublicIPv6: net.ParseIP("1234::"), - PrivateIPv6: net.ParseIP("5678::"), - } - expect := "COREOS_PRIVATE_IPV4=5.6.7.8\nCOREOS_PRIVATE_IPV6=5678::\nCOREOS_PUBLIC_IPV4=1.2.3.4\nCOREOS_PUBLIC_IPV6=1234::\n" - - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - env := NewEnvironment("./", "./", "./", "", metadata) - ef := env.DefaultEnvironmentFile() - err = system.WriteEnvFile(ef, dir) - if err != nil { - t.Fatalf("WriteEnvFile failed: %v", err) - } - - fullPath := path.Join(dir, "etc", "environment") - contents, err := ioutil.ReadFile(fullPath) - if err != nil { - t.Fatalf("Unable to read expected file: %v", err) - } - - if string(contents) != expect { - t.Fatalf("File has incorrect contents: %q", contents) - } -} - -func TestEnvironmentFileNil(t *testing.T) { - os.Clearenv() - metadata := datasource.Metadata{} - - env := NewEnvironment("./", "./", "./", "", metadata) - ef := env.DefaultEnvironmentFile() - if ef != nil { - t.Fatalf("Environment file not nil: %v", ef) - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/ssh_keys_test.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/ssh_keys_test.go deleted file mode 100644 index 86395797..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/initialize/ssh_keys_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package initialize - -import ( - "fmt" - "net/http" - "net/http/httptest" - "testing" -) - -func TestCloudConfigUsersUrlMarshal(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - gh_res := ` -[ - { - "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIHAu822ggSkIHrJYvhmBceOSVjuflfQm8RbMMDNVe9relQfuPbN+nxGGTCKzPLebeOcX+Wwi77TPXWwK3BZMglfXxhABlFPsuMb63Tqp94pBYsJdx/iFj9iGo6pKoM1k8ubOcqsUnq+BR9895zRbE7MjdwkGo67+QhCEwvkwAnNAAAAFQCuddVqXLCubzqnWmeHLQE+2GFfHwAAAIBnlXW5h15ndVuwi0htF4oodVSB1KwnTWcuBK+aE1zRs76yvRb0Ws+oifumThDwB/Tec6FQuAfRKfy6piChZqsu5KvL98I+2t5yyi1td+kMvdTnVL2lW44etDKseOcozmknCOmh4Dqvhl/2MwrDAhlPaN08EEq9h3w3mXtNLWH64QAAAIBAzDOKr17llngaKIdDXh+LtXKh87+zfjlTA36/9r2uF2kYE5uApDtu9sPCkt7+YBQt7R8prADPckwAiXwVdk0xijIOpLDBmoydQJJRQ+zTMxvpQmUr/1kUOv0zb+lB657CgvN0vVTmP2swPeMvgntt3C4vw7Ab+O+MS9peOAJbbQ==" - }, - { - "key": "ssh-dss AAAAB3NzaC1kc3MAAACBANxpzIbTzKTeBRaOIdUxwwGwvDasTfU/PonhbNIuhYjc+xFGvBRTumox2F+luVAKKs4WdvA4nJXaY1OFi6DZftk5Bp4E2JaSzp8ulAzHsMexDdv6LGHGEJj/qdHAL1vHk2K89PpwRFSRZI8XRBLjvkr4ZgBKLG5ZILXPJEPP2j3lAAAAFQCtxoTnV8wy0c4grcGrQ+1sCsD7WQAAAIAqZsW2GviMe1RQrbZT0xAZmI64XRPrnLsoLxycHWlS7r6uUln2c6Ae2MB/YF0d4Kd1XZii9GHj7rrypqEo7MW8uSabhu70nmu1J8m2O3Dsr+4oJLeat9vwPsJV92IKO0jQwjKnAOHOiB9JKGeCw+NfXfogbti9/q38Q6XcS+SI5wAAAIEA1803Y2h+tOOpZXAsNIwl9mRfExWzLQ3L7knwJdznQu/6SW1H/1oyoYLebuk187Qj2UFI5qQ6AZNc49DvohWx0Cg6ABcyubNyoaCjZKWIdxVnItHWNbLe//+tyTu0I2eQwJOORsEPK5gMpf599C7wXQ//DzZOWbTWiHEX52gCTmk=" - }, - { - "id": 5224438, - "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAPKRWdKhzGZuLAJL6M1eM51hWViMqNBC2C6lm2OqGRYLuIf1GJ391widUuSf4wQqnkR22Q9PCmAZ19XCf11wBRMnuw9I/Z3Bt5bXfc+dzFBCmHYGJ6wNSv++H9jxyMb+usmsenWOFZGNO2jN0wrJ4ay8Yt0bwtRU+VCXpuRLszMzAAAAFQDZUIuPjcfK5HLgnwZ/J3lvtvlUjQAAAIEApIkAwLuCQV5j3U6DmI/Y6oELqSUR2purFm8jo8jePFfe1t+ghikgD254/JXlhDCVgY0NLXcak+coJfGCTT23quJ7I5xdpTn/OZO2Q6Woum/bijFC/UWwQbLz0R2nU3DoHv5v6XHQZxuIG4Fsxa91S+vWjZFtI7RuYlBCZA//ANMAAACBAJO0FojzkX6IeaWLqrgu9GTkFwGFazZ+LPH5JOWPoPn1hQKuR32Uf6qNcBZcIjY7SF0P7HF5rLQd6zKZzHqqQQ92MV555NEwjsnJglYU8CaaZsfYooaGPgA1YN7RhTSAuDmUW5Hyfj5BH4NTtrzrvJxIhDoQLf31Fasjw00r4R0O" - } -] -` - fmt.Fprintln(w, gh_res) - })) - defer ts.Close() - - keys, err := fetchUserKeys(ts.URL) - if err != nil { - t.Fatalf("Encountered unexpected error: %v", err) - } - expected := "ssh-dss AAAAB3NzaC1kc3MAAACBAIHAu822ggSkIHrJYvhmBceOSVjuflfQm8RbMMDNVe9relQfuPbN+nxGGTCKzPLebeOcX+Wwi77TPXWwK3BZMglfXxhABlFPsuMb63Tqp94pBYsJdx/iFj9iGo6pKoM1k8ubOcqsUnq+BR9895zRbE7MjdwkGo67+QhCEwvkwAnNAAAAFQCuddVqXLCubzqnWmeHLQE+2GFfHwAAAIBnlXW5h15ndVuwi0htF4oodVSB1KwnTWcuBK+aE1zRs76yvRb0Ws+oifumThDwB/Tec6FQuAfRKfy6piChZqsu5KvL98I+2t5yyi1td+kMvdTnVL2lW44etDKseOcozmknCOmh4Dqvhl/2MwrDAhlPaN08EEq9h3w3mXtNLWH64QAAAIBAzDOKr17llngaKIdDXh+LtXKh87+zfjlTA36/9r2uF2kYE5uApDtu9sPCkt7+YBQt7R8prADPckwAiXwVdk0xijIOpLDBmoydQJJRQ+zTMxvpQmUr/1kUOv0zb+lB657CgvN0vVTmP2swPeMvgntt3C4vw7Ab+O+MS9peOAJbbQ==" - if keys[0] != expected { - t.Fatalf("expected %s, got %s", expected, keys[0]) - } - expected = "ssh-dss AAAAB3NzaC1kc3MAAACBAPKRWdKhzGZuLAJL6M1eM51hWViMqNBC2C6lm2OqGRYLuIf1GJ391widUuSf4wQqnkR22Q9PCmAZ19XCf11wBRMnuw9I/Z3Bt5bXfc+dzFBCmHYGJ6wNSv++H9jxyMb+usmsenWOFZGNO2jN0wrJ4ay8Yt0bwtRU+VCXpuRLszMzAAAAFQDZUIuPjcfK5HLgnwZ/J3lvtvlUjQAAAIEApIkAwLuCQV5j3U6DmI/Y6oELqSUR2purFm8jo8jePFfe1t+ghikgD254/JXlhDCVgY0NLXcak+coJfGCTT23quJ7I5xdpTn/OZO2Q6Woum/bijFC/UWwQbLz0R2nU3DoHv5v6XHQZxuIG4Fsxa91S+vWjZFtI7RuYlBCZA//ANMAAACBAJO0FojzkX6IeaWLqrgu9GTkFwGFazZ+LPH5JOWPoPn1hQKuR32Uf6qNcBZcIjY7SF0P7HF5rLQd6zKZzHqqQQ92MV555NEwjsnJglYU8CaaZsfYooaGPgA1YN7RhTSAuDmUW5Hyfj5BH4NTtrzrvJxIhDoQLf31Fasjw00r4R0O" - if keys[2] != expected { - t.Fatalf("expected %s, got %s", expected, keys[2]) - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/user_data.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/user_data.go index c728d64e..42cbcd50 100644 --- a/vendor/github.com/coreos/coreos-cloudinit/initialize/user_data.go +++ b/vendor/github.com/coreos/coreos-cloudinit/initialize/user_data.go @@ -36,7 +36,16 @@ func ParseUserData(contents string) (interface{}, error) { return config.NewScript(contents) case config.IsCloudConfig(contents): log.Printf("Parsing user-data as cloud-config") - return config.NewCloudConfig(contents) + cc, err := config.NewCloudConfig(contents) + if err != nil { + return nil, err + } + + if err := cc.Decode(); err != nil { + return nil, err + } + + return cc, nil case config.IsIgnitionConfig(contents): return nil, ErrIgnitionConfig default: diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/user_data_test.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/user_data_test.go deleted file mode 100644 index 7a5acc98..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/initialize/user_data_test.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package initialize - -import ( - "testing" - - "github.com/coreos/coreos-cloudinit/config" -) - -func TestParseHeaderCRLF(t *testing.T) { - configs := []string{ - "#cloud-config\nfoo: bar", - "#cloud-config\r\nfoo: bar", - } - - for i, config := range configs { - _, err := ParseUserData(config) - if err != nil { - t.Errorf("Failed parsing config %d: %v", i, err) - } - } - - scripts := []string{ - "#!bin/bash\necho foo", - "#!bin/bash\r\necho foo", - } - - for i, script := range scripts { - _, err := ParseUserData(script) - if err != nil { - t.Errorf("Failed parsing script %d: %v", i, err) - } - } -} - -func TestParseConfigCRLF(t *testing.T) { - contents := "#cloud-config \r\nhostname: foo\r\nssh_authorized_keys:\r\n - foobar\r\n" - ud, err := ParseUserData(contents) - if err != nil { - t.Fatalf("Failed parsing config: %v", err) - } - - cfg := ud.(*config.CloudConfig) - - if cfg.Hostname != "foo" { - t.Error("Failed parsing hostname from config") - } - - if len(cfg.SSHAuthorizedKeys) != 1 { - t.Error("Parsed incorrect number of SSH keys") - } -} - -func TestParseConfigEmpty(t *testing.T) { - i, e := ParseUserData(``) - if i != nil { - t.Error("ParseUserData of empty string returned non-nil unexpectedly") - } else if e != nil { - t.Error("ParseUserData of empty string returned error unexpectedly") - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/debian.go b/vendor/github.com/coreos/coreos-cloudinit/network/debian.go deleted file mode 100644 index 91646cbd..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/debian.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "log" - "strings" -) - -func ProcessDebianNetconf(config []byte) ([]InterfaceGenerator, error) { - log.Println("Processing Debian network config") - lines := formatConfig(string(config)) - stanzas, err := parseStanzas(lines) - if err != nil { - return nil, err - } - - interfaces := make([]*stanzaInterface, 0, len(stanzas)) - for _, stanza := range stanzas { - switch s := stanza.(type) { - case *stanzaInterface: - interfaces = append(interfaces, s) - } - } - log.Printf("Parsed %d network interfaces\n", len(interfaces)) - - log.Println("Processed Debian network config") - return buildInterfaces(interfaces), nil -} - -func formatConfig(config string) []string { - lines := []string{} - config = strings.Replace(config, "\\\n", "", -1) - for config != "" { - split := strings.SplitN(config, "\n", 2) - line := strings.TrimSpace(split[0]) - - if len(split) == 2 { - config = split[1] - } else { - config = "" - } - - if strings.HasPrefix(line, "#") || line == "" { - continue - } - - lines = append(lines, line) - } - return lines -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/debian_test.go b/vendor/github.com/coreos/coreos-cloudinit/network/debian_test.go deleted file mode 100644 index da9e281d..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/debian_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "testing" -) - -func TestFormatConfigs(t *testing.T) { - for in, n := range map[string]int{ - "": 0, - "line1\\\nis long": 1, - "#comment": 0, - "#comment\\\ncomment": 0, - " #comment \\\n comment\nline 1\nline 2\\\n is long": 2, - } { - lines := formatConfig(in) - if len(lines) != n { - t.Fatalf("bad number of lines for config %q: got %d, want %d", in, len(lines), n) - } - } -} - -func TestProcessDebianNetconf(t *testing.T) { - for _, tt := range []struct { - in string - fail bool - n int - }{ - {"", false, 0}, - {"iface", true, -1}, - {"auto eth1\nauto eth2", false, 0}, - {"iface eth1 inet manual", false, 1}, - } { - interfaces, err := ProcessDebianNetconf([]byte(tt.in)) - failed := err != nil - if tt.fail != failed { - t.Fatalf("bad failure state for %q: got %t, want %t", tt.in, failed, tt.fail) - } - if tt.n != -1 && tt.n != len(interfaces) { - t.Fatalf("bad number of interfaces for %q: got %d, want %q", tt.in, len(interfaces), tt.n) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean.go b/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean.go deleted file mode 100644 index 78759a10..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "fmt" - "log" - "net" - - "github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean" -) - -func ProcessDigitalOceanNetconf(config digitalocean.Metadata) ([]InterfaceGenerator, error) { - log.Println("Processing DigitalOcean network config") - - log.Println("Parsing nameservers") - nameservers, err := parseNameservers(config.DNS) - if err != nil { - return nil, err - } - log.Printf("Parsed %d nameservers\n", len(nameservers)) - - log.Println("Parsing interfaces") - generators, err := parseInterfaces(config.Interfaces, nameservers) - if err != nil { - return nil, err - } - log.Printf("Parsed %d network interfaces\n", len(generators)) - - log.Println("Processed DigitalOcean network config") - return generators, nil -} - -func parseNameservers(config digitalocean.DNS) ([]net.IP, error) { - nameservers := make([]net.IP, 0, len(config.Nameservers)) - for _, ns := range config.Nameservers { - if ip := net.ParseIP(ns); ip == nil { - return nil, fmt.Errorf("could not parse %q as nameserver IP address", ns) - } else { - nameservers = append(nameservers, ip) - } - } - return nameservers, nil -} - -func parseInterfaces(config digitalocean.Interfaces, nameservers []net.IP) ([]InterfaceGenerator, error) { - generators := make([]InterfaceGenerator, 0, len(config.Public)+len(config.Private)) - for _, iface := range config.Public { - if generator, err := parseInterface(iface, nameservers, true); err == nil { - generators = append(generators, &physicalInterface{*generator}) - } else { - return nil, err - } - } - for _, iface := range config.Private { - if generator, err := parseInterface(iface, []net.IP{}, false); err == nil { - generators = append(generators, &physicalInterface{*generator}) - } else { - return nil, err - } - } - return generators, nil -} - -func parseInterface(iface digitalocean.Interface, nameservers []net.IP, useRoute bool) (*logicalInterface, error) { - routes := make([]route, 0) - addresses := make([]net.IPNet, 0) - if iface.IPv4 != nil { - var ip, mask, gateway net.IP - if ip = net.ParseIP(iface.IPv4.IPAddress); ip == nil { - return nil, fmt.Errorf("could not parse %q as IPv4 address", iface.IPv4.IPAddress) - } - if mask = net.ParseIP(iface.IPv4.Netmask); mask == nil { - return nil, fmt.Errorf("could not parse %q as IPv4 mask", iface.IPv4.Netmask) - } - addresses = append(addresses, net.IPNet{ - IP: ip, - Mask: net.IPMask(mask), - }) - - if useRoute { - if gateway = net.ParseIP(iface.IPv4.Gateway); gateway == nil { - return nil, fmt.Errorf("could not parse %q as IPv4 gateway", iface.IPv4.Gateway) - } - routes = append(routes, route{ - destination: net.IPNet{ - IP: net.IPv4zero, - Mask: net.IPMask(net.IPv4zero), - }, - gateway: gateway, - }) - } - } - if iface.IPv6 != nil { - var ip, gateway net.IP - if ip = net.ParseIP(iface.IPv6.IPAddress); ip == nil { - return nil, fmt.Errorf("could not parse %q as IPv6 address", iface.IPv6.IPAddress) - } - addresses = append(addresses, net.IPNet{ - IP: ip, - Mask: net.CIDRMask(iface.IPv6.Cidr, net.IPv6len*8), - }) - - if useRoute { - if gateway = net.ParseIP(iface.IPv6.Gateway); gateway == nil { - return nil, fmt.Errorf("could not parse %q as IPv6 gateway", iface.IPv6.Gateway) - } - routes = append(routes, route{ - destination: net.IPNet{ - IP: net.IPv6zero, - Mask: net.IPMask(net.IPv6zero), - }, - gateway: gateway, - }) - } - } - if iface.AnchorIPv4 != nil { - var ip, mask net.IP - if ip = net.ParseIP(iface.AnchorIPv4.IPAddress); ip == nil { - return nil, fmt.Errorf("could not parse %q as anchor IPv4 address", iface.AnchorIPv4.IPAddress) - } - if mask = net.ParseIP(iface.AnchorIPv4.Netmask); mask == nil { - return nil, fmt.Errorf("could not parse %q as anchor IPv4 mask", iface.AnchorIPv4.Netmask) - } - addresses = append(addresses, net.IPNet{ - IP: ip, - Mask: net.IPMask(mask), - }) - - if useRoute { - routes = append(routes, route{ - destination: net.IPNet{ - IP: net.IPv4zero, - Mask: net.IPMask(net.IPv4zero), - }, - }) - } - } - - hwaddr, err := net.ParseMAC(iface.MAC) - if err != nil { - return nil, err - } - - if nameservers == nil { - nameservers = []net.IP{} - } - - return &logicalInterface{ - hwaddr: hwaddr, - config: configMethodStatic{ - addresses: addresses, - nameservers: nameservers, - routes: routes, - }, - }, nil -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean_test.go b/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean_test.go deleted file mode 100644 index fc4fc6d5..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean_test.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "errors" - "net" - "reflect" - "testing" - - "github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean" -) - -func TestParseNameservers(t *testing.T) { - for _, tt := range []struct { - dns digitalocean.DNS - nss []net.IP - err error - }{ - { - dns: digitalocean.DNS{}, - nss: []net.IP{}, - }, - { - dns: digitalocean.DNS{Nameservers: []string{"1.2.3.4"}}, - nss: []net.IP{net.ParseIP("1.2.3.4")}, - }, - { - dns: digitalocean.DNS{Nameservers: []string{"bad"}}, - err: errors.New("could not parse \"bad\" as nameserver IP address"), - }, - } { - nss, err := parseNameservers(tt.dns) - if !errorsEqual(tt.err, err) { - t.Fatalf("bad error (%+v): want %q, got %q", tt.dns, tt.err, err) - } - if !reflect.DeepEqual(tt.nss, nss) { - t.Fatalf("bad nameservers (%+v): want %#v, got %#v", tt.dns, tt.nss, nss) - } - } -} - -func mkInvalidMAC() error { - if isGo15 { - return &net.AddrError{Err: "invalid MAC address", Addr: "bad"} - } else { - return errors.New("invalid MAC address: bad") - } -} - -func TestParseInterface(t *testing.T) { - for _, tt := range []struct { - cfg digitalocean.Interface - nss []net.IP - useRoute bool - iface *logicalInterface - err error - }{ - { - cfg: digitalocean.Interface{ - MAC: "bad", - }, - err: mkInvalidMAC(), - }, - { - cfg: digitalocean.Interface{ - MAC: "01:23:45:67:89:AB", - }, - nss: []net.IP{}, - iface: &logicalInterface{ - hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), - config: configMethodStatic{ - addresses: []net.IPNet{}, - nameservers: []net.IP{}, - routes: []route{}, - }, - }, - }, - { - cfg: digitalocean.Interface{ - MAC: "01:23:45:67:89:AB", - }, - useRoute: true, - nss: []net.IP{net.ParseIP("1.2.3.4")}, - iface: &logicalInterface{ - hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), - config: configMethodStatic{ - addresses: []net.IPNet{}, - nameservers: []net.IP{net.ParseIP("1.2.3.4")}, - routes: []route{}, - }, - }, - }, - { - cfg: digitalocean.Interface{ - MAC: "01:23:45:67:89:AB", - IPv4: &digitalocean.Address{ - IPAddress: "bad", - Netmask: "255.255.0.0", - }, - }, - nss: []net.IP{}, - err: errors.New("could not parse \"bad\" as IPv4 address"), - }, - { - cfg: digitalocean.Interface{ - MAC: "01:23:45:67:89:AB", - IPv4: &digitalocean.Address{ - IPAddress: "1.2.3.4", - Netmask: "bad", - }, - }, - nss: []net.IP{}, - err: errors.New("could not parse \"bad\" as IPv4 mask"), - }, - { - cfg: digitalocean.Interface{ - MAC: "01:23:45:67:89:AB", - IPv4: &digitalocean.Address{ - IPAddress: "1.2.3.4", - Netmask: "255.255.0.0", - Gateway: "ignoreme", - }, - }, - nss: []net.IP{}, - iface: &logicalInterface{ - hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), - config: configMethodStatic{ - addresses: []net.IPNet{net.IPNet{ - IP: net.ParseIP("1.2.3.4"), - Mask: net.IPMask(net.ParseIP("255.255.0.0")), - }}, - nameservers: []net.IP{}, - routes: []route{}, - }, - }, - }, - { - cfg: digitalocean.Interface{ - MAC: "01:23:45:67:89:AB", - IPv4: &digitalocean.Address{ - IPAddress: "1.2.3.4", - Netmask: "255.255.0.0", - Gateway: "bad", - }, - }, - useRoute: true, - nss: []net.IP{}, - err: errors.New("could not parse \"bad\" as IPv4 gateway"), - }, - { - cfg: digitalocean.Interface{ - MAC: "01:23:45:67:89:AB", - IPv4: &digitalocean.Address{ - IPAddress: "1.2.3.4", - Netmask: "255.255.0.0", - Gateway: "5.6.7.8", - }, - }, - useRoute: true, - nss: []net.IP{}, - iface: &logicalInterface{ - hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), - config: configMethodStatic{ - addresses: []net.IPNet{net.IPNet{ - IP: net.ParseIP("1.2.3.4"), - Mask: net.IPMask(net.ParseIP("255.255.0.0")), - }}, - nameservers: []net.IP{}, - routes: []route{route{ - net.IPNet{IP: net.IPv4zero, Mask: net.IPMask(net.IPv4zero)}, - net.ParseIP("5.6.7.8"), - }}, - }, - }, - }, - { - cfg: digitalocean.Interface{ - MAC: "01:23:45:67:89:AB", - IPv6: &digitalocean.Address{ - IPAddress: "bad", - Cidr: 16, - }, - }, - nss: []net.IP{}, - err: errors.New("could not parse \"bad\" as IPv6 address"), - }, - { - cfg: digitalocean.Interface{ - MAC: "01:23:45:67:89:AB", - IPv6: &digitalocean.Address{ - IPAddress: "fe00::", - Cidr: 16, - Gateway: "ignoreme", - }, - }, - nss: []net.IP{}, - iface: &logicalInterface{ - hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), - config: configMethodStatic{ - addresses: []net.IPNet{net.IPNet{ - IP: net.ParseIP("fe00::"), - Mask: net.IPMask(net.ParseIP("ffff::")), - }}, - nameservers: []net.IP{}, - routes: []route{}, - }, - }, - }, - { - cfg: digitalocean.Interface{ - MAC: "01:23:45:67:89:AB", - IPv6: &digitalocean.Address{ - IPAddress: "fe00::", - Cidr: 16, - Gateway: "bad", - }, - }, - useRoute: true, - nss: []net.IP{}, - err: errors.New("could not parse \"bad\" as IPv6 gateway"), - }, - { - cfg: digitalocean.Interface{ - MAC: "01:23:45:67:89:AB", - IPv6: &digitalocean.Address{ - IPAddress: "fe00::", - Cidr: 16, - Gateway: "fe00:1234::", - }, - }, - useRoute: true, - nss: []net.IP{}, - iface: &logicalInterface{ - hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), - config: configMethodStatic{ - addresses: []net.IPNet{net.IPNet{ - IP: net.ParseIP("fe00::"), - Mask: net.IPMask(net.ParseIP("ffff::")), - }}, - nameservers: []net.IP{}, - routes: []route{route{ - net.IPNet{IP: net.IPv6zero, Mask: net.IPMask(net.IPv6zero)}, - net.ParseIP("fe00:1234::"), - }}, - }, - }, - }, - - { - cfg: digitalocean.Interface{ - MAC: "01:23:45:67:89:AB", - AnchorIPv4: &digitalocean.Address{ - IPAddress: "bad", - Netmask: "255.255.0.0", - }, - }, - nss: []net.IP{}, - err: errors.New("could not parse \"bad\" as anchor IPv4 address"), - }, - { - cfg: digitalocean.Interface{ - MAC: "01:23:45:67:89:AB", - AnchorIPv4: &digitalocean.Address{ - IPAddress: "1.2.3.4", - Netmask: "bad", - }, - }, - nss: []net.IP{}, - err: errors.New("could not parse \"bad\" as anchor IPv4 mask"), - }, - { - cfg: digitalocean.Interface{ - MAC: "01:23:45:67:89:AB", - IPv4: &digitalocean.Address{ - IPAddress: "1.2.3.4", - Netmask: "255.255.0.0", - Gateway: "5.6.7.8", - }, - AnchorIPv4: &digitalocean.Address{ - IPAddress: "7.8.9.10", - Netmask: "255.255.0.0", - }, - }, - useRoute: true, - nss: []net.IP{}, - iface: &logicalInterface{ - hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), - config: configMethodStatic{ - addresses: []net.IPNet{ - { - IP: net.ParseIP("1.2.3.4"), - Mask: net.IPMask(net.ParseIP("255.255.0.0")), - }, - { - IP: net.ParseIP("7.8.9.10"), - Mask: net.IPMask(net.ParseIP("255.255.0.0")), - }, - }, - nameservers: []net.IP{}, - routes: []route{ - { - destination: net.IPNet{IP: net.IPv4zero, Mask: net.IPMask(net.IPv4zero)}, - gateway: net.ParseIP("5.6.7.8"), - }, - { - destination: net.IPNet{IP: net.IPv4zero, Mask: net.IPMask(net.IPv4zero)}, - }, - }, - }, - }, - }, - } { - iface, err := parseInterface(tt.cfg, tt.nss, tt.useRoute) - if !errorsEqual(tt.err, err) { - t.Fatalf("bad error (%+v): want %q, got %q", tt.cfg, tt.err, err) - } - if !reflect.DeepEqual(tt.iface, iface) { - t.Fatalf("bad interface (%+v): want %#v, got %#v", tt.cfg, tt.iface, iface) - } - } -} - -func TestParseInterfaces(t *testing.T) { - for _, tt := range []struct { - cfg digitalocean.Interfaces - nss []net.IP - ifaces []InterfaceGenerator - err error - }{ - { - ifaces: []InterfaceGenerator{}, - }, - { - cfg: digitalocean.Interfaces{ - Public: []digitalocean.Interface{{MAC: "01:23:45:67:89:AB"}}, - }, - ifaces: []InterfaceGenerator{ - &physicalInterface{logicalInterface{ - hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), - config: configMethodStatic{ - addresses: []net.IPNet{}, - nameservers: []net.IP{}, - routes: []route{}, - }, - }}, - }, - }, - { - cfg: digitalocean.Interfaces{ - Private: []digitalocean.Interface{{MAC: "01:23:45:67:89:AB"}}, - }, - ifaces: []InterfaceGenerator{ - &physicalInterface{logicalInterface{ - hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), - config: configMethodStatic{ - addresses: []net.IPNet{}, - nameservers: []net.IP{}, - routes: []route{}, - }, - }}, - }, - }, - { - cfg: digitalocean.Interfaces{ - Public: []digitalocean.Interface{{MAC: "01:23:45:67:89:AB"}}, - }, - nss: []net.IP{net.ParseIP("1.2.3.4")}, - ifaces: []InterfaceGenerator{ - &physicalInterface{logicalInterface{ - hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), - config: configMethodStatic{ - addresses: []net.IPNet{}, - nameservers: []net.IP{net.ParseIP("1.2.3.4")}, - routes: []route{}, - }, - }}, - }, - }, - { - cfg: digitalocean.Interfaces{ - Private: []digitalocean.Interface{{MAC: "01:23:45:67:89:AB"}}, - }, - nss: []net.IP{net.ParseIP("1.2.3.4")}, - ifaces: []InterfaceGenerator{ - &physicalInterface{logicalInterface{ - hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), - config: configMethodStatic{ - addresses: []net.IPNet{}, - nameservers: []net.IP{}, - routes: []route{}, - }, - }}, - }, - }, - { - cfg: digitalocean.Interfaces{ - Public: []digitalocean.Interface{{MAC: "bad"}}, - }, - err: mkInvalidMAC(), - }, - { - cfg: digitalocean.Interfaces{ - Private: []digitalocean.Interface{{MAC: "bad"}}, - }, - err: mkInvalidMAC(), - }, - } { - ifaces, err := parseInterfaces(tt.cfg, tt.nss) - if !errorsEqual(tt.err, err) { - t.Fatalf("bad error (%+v): want %q, got %q", tt.cfg, tt.err, err) - } - if !reflect.DeepEqual(tt.ifaces, ifaces) { - t.Fatalf("bad interfaces (%+v): want %#v, got %#v", tt.cfg, tt.ifaces, ifaces) - } - } -} - -func TestProcessDigitalOceanNetconf(t *testing.T) { - for _, tt := range []struct { - cfg digitalocean.Metadata - ifaces []InterfaceGenerator - err error - }{ - { - cfg: digitalocean.Metadata{ - DNS: digitalocean.DNS{ - Nameservers: []string{"bad"}, - }, - }, - err: errors.New("could not parse \"bad\" as nameserver IP address"), - }, - { - cfg: digitalocean.Metadata{ - Interfaces: digitalocean.Interfaces{ - Public: []digitalocean.Interface{ - digitalocean.Interface{ - IPv4: &digitalocean.Address{ - IPAddress: "bad", - }, - }, - }, - }, - }, - err: errors.New("could not parse \"bad\" as IPv4 address"), - }, - { - ifaces: []InterfaceGenerator{}, - }, - } { - ifaces, err := ProcessDigitalOceanNetconf(tt.cfg) - if !errorsEqual(tt.err, err) { - t.Fatalf("bad error (%q): want %q, got %q", tt.cfg, tt.err, err) - } - if !reflect.DeepEqual(tt.ifaces, ifaces) { - t.Fatalf("bad interfaces (%q): want %#v, got %#v", tt.cfg, tt.ifaces, ifaces) - } - } -} - -func errorsEqual(a, b error) bool { - if a == nil && b == nil { - return true - } - if (a != nil && b == nil) || (a == nil && b != nil) { - return false - } - return (a.Error() == b.Error()) -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/interface.go b/vendor/github.com/coreos/coreos-cloudinit/network/interface.go deleted file mode 100644 index 73a83cbc..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/interface.go +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "fmt" - "net" - "sort" - "strconv" - "strings" -) - -type InterfaceGenerator interface { - Name() string - Filename() string - Netdev() string - Link() string - Network() string - Type() string - ModprobeParams() string -} - -type networkInterface interface { - InterfaceGenerator - Children() []networkInterface - setConfigDepth(int) -} - -type logicalInterface struct { - name string - hwaddr net.HardwareAddr - config configMethod - children []networkInterface - configDepth int -} - -func (i *logicalInterface) Name() string { - return i.name -} - -func (i *logicalInterface) Network() string { - config := fmt.Sprintln("[Match]") - if i.name != "" { - config += fmt.Sprintf("Name=%s\n", i.name) - } - if i.hwaddr != nil { - config += fmt.Sprintf("MACAddress=%s\n", i.hwaddr) - } - config += "\n[Network]\n" - - for _, child := range i.children { - switch iface := child.(type) { - case *vlanInterface: - config += fmt.Sprintf("VLAN=%s\n", iface.name) - case *bondInterface: - config += fmt.Sprintf("Bond=%s\n", iface.name) - } - } - - switch conf := i.config.(type) { - case configMethodStatic: - for _, nameserver := range conf.nameservers { - config += fmt.Sprintf("DNS=%s\n", nameserver) - } - for _, addr := range conf.addresses { - config += fmt.Sprintf("\n[Address]\nAddress=%s\n", addr.String()) - } - for _, route := range conf.routes { - config += fmt.Sprintf("\n[Route]\nDestination=%s\nGateway=%s\n", route.destination.String(), route.gateway) - } - case configMethodDHCP: - config += "DHCP=true\n" - } - - return config -} - -func (i *logicalInterface) Link() string { - return "" -} - -func (i *logicalInterface) Netdev() string { - return "" -} - -func (i *logicalInterface) Filename() string { - name := i.name - if name == "" { - name = i.hwaddr.String() - } - return fmt.Sprintf("%02x-%s", i.configDepth, name) -} - -func (i *logicalInterface) Children() []networkInterface { - return i.children -} - -func (i *logicalInterface) ModprobeParams() string { - return "" -} - -func (i *logicalInterface) setConfigDepth(depth int) { - i.configDepth = depth -} - -type physicalInterface struct { - logicalInterface -} - -func (p *physicalInterface) Type() string { - return "physical" -} - -type bondInterface struct { - logicalInterface - slaves []string - options map[string]string -} - -func (b *bondInterface) Netdev() string { - config := fmt.Sprintf("[NetDev]\nKind=bond\nName=%s\n", b.name) - if b.hwaddr != nil { - config += fmt.Sprintf("MACAddress=%s\n", b.hwaddr.String()) - } - - config += fmt.Sprintf("\n[Bond]\n") - for _, name := range sortedKeys(b.options) { - config += fmt.Sprintf("%s=%s\n", name, b.options[name]) - } - - return config -} - -func (b *bondInterface) Type() string { - return "bond" -} - -func (b *bondInterface) ModprobeParams() string { - params := "" - for _, name := range sortedKeys(b.options) { - params += fmt.Sprintf("%s=%s ", name, b.options[name]) - } - params = strings.TrimSuffix(params, " ") - return params -} - -type vlanInterface struct { - logicalInterface - id int - rawDevice string -} - -func (v *vlanInterface) Netdev() string { - config := fmt.Sprintf("[NetDev]\nKind=vlan\nName=%s\n", v.name) - switch c := v.config.(type) { - case configMethodStatic: - if c.hwaddress != nil { - config += fmt.Sprintf("MACAddress=%s\n", c.hwaddress) - } - case configMethodDHCP: - if c.hwaddress != nil { - config += fmt.Sprintf("MACAddress=%s\n", c.hwaddress) - } - } - config += fmt.Sprintf("\n[VLAN]\nId=%d\n", v.id) - return config -} - -func (v *vlanInterface) Type() string { - return "vlan" -} - -func buildInterfaces(stanzas []*stanzaInterface) []InterfaceGenerator { - interfaceMap := createInterfaces(stanzas) - linkAncestors(interfaceMap) - markConfigDepths(interfaceMap) - - interfaces := make([]InterfaceGenerator, 0, len(interfaceMap)) - for _, name := range sortedInterfaces(interfaceMap) { - interfaces = append(interfaces, interfaceMap[name]) - } - - return interfaces -} - -func createInterfaces(stanzas []*stanzaInterface) map[string]networkInterface { - interfaceMap := make(map[string]networkInterface) - for _, iface := range stanzas { - switch iface.kind { - case interfaceBond: - bondOptions := make(map[string]string) - for _, k := range []string{"mode", "miimon", "lacp-rate"} { - if v, ok := iface.options["bond-"+k]; ok && len(v) > 0 { - bondOptions[k] = v[0] - } - } - interfaceMap[iface.name] = &bondInterface{ - logicalInterface{ - name: iface.name, - config: iface.configMethod, - children: []networkInterface{}, - }, - iface.options["bond-slaves"], - bondOptions, - } - for _, slave := range iface.options["bond-slaves"] { - if _, ok := interfaceMap[slave]; !ok { - interfaceMap[slave] = &physicalInterface{ - logicalInterface{ - name: slave, - config: configMethodManual{}, - children: []networkInterface{}, - }, - } - } - } - - case interfacePhysical: - if _, ok := iface.configMethod.(configMethodLoopback); ok { - continue - } - interfaceMap[iface.name] = &physicalInterface{ - logicalInterface{ - name: iface.name, - config: iface.configMethod, - children: []networkInterface{}, - }, - } - - case interfaceVLAN: - var rawDevice string - id, _ := strconv.Atoi(iface.options["id"][0]) - if device := iface.options["raw_device"]; len(device) == 1 { - rawDevice = device[0] - if _, ok := interfaceMap[rawDevice]; !ok { - interfaceMap[rawDevice] = &physicalInterface{ - logicalInterface{ - name: rawDevice, - config: configMethodManual{}, - children: []networkInterface{}, - }, - } - } - } - interfaceMap[iface.name] = &vlanInterface{ - logicalInterface{ - name: iface.name, - config: iface.configMethod, - children: []networkInterface{}, - }, - id, - rawDevice, - } - } - } - return interfaceMap -} - -func linkAncestors(interfaceMap map[string]networkInterface) { - for _, name := range sortedInterfaces(interfaceMap) { - iface := interfaceMap[name] - switch i := iface.(type) { - case *vlanInterface: - if parent, ok := interfaceMap[i.rawDevice]; ok { - switch p := parent.(type) { - case *physicalInterface: - p.children = append(p.children, iface) - case *bondInterface: - p.children = append(p.children, iface) - } - } - case *bondInterface: - for _, slave := range i.slaves { - if parent, ok := interfaceMap[slave]; ok { - switch p := parent.(type) { - case *physicalInterface: - p.children = append(p.children, iface) - case *bondInterface: - p.children = append(p.children, iface) - } - } - } - } - } -} - -func markConfigDepths(interfaceMap map[string]networkInterface) { - rootInterfaceMap := make(map[string]networkInterface) - for k, v := range interfaceMap { - rootInterfaceMap[k] = v - } - - for _, iface := range interfaceMap { - for _, child := range iface.Children() { - delete(rootInterfaceMap, child.Name()) - } - } - for _, iface := range rootInterfaceMap { - setDepth(iface) - } -} - -func setDepth(iface networkInterface) int { - maxDepth := 0 - for _, child := range iface.Children() { - if depth := setDepth(child); depth > maxDepth { - maxDepth = depth - } - } - iface.setConfigDepth(maxDepth) - return (maxDepth + 1) -} - -func sortedKeys(m map[string]string) (keys []string) { - for key := range m { - keys = append(keys, key) - } - sort.Strings(keys) - return -} - -func sortedInterfaces(m map[string]networkInterface) (keys []string) { - for key := range m { - keys = append(keys, key) - } - sort.Strings(keys) - return -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/interface_test.go b/vendor/github.com/coreos/coreos-cloudinit/network/interface_test.go deleted file mode 100644 index 5cafc939..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/interface_test.go +++ /dev/null @@ -1,368 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "net" - "reflect" - "testing" -) - -func TestInterfaceGenerators(t *testing.T) { - for _, tt := range []struct { - name string - netdev string - link string - network string - kind string - iface InterfaceGenerator - }{ - { - name: "", - network: "[Match]\nMACAddress=00:01:02:03:04:05\n\n[Network]\n", - kind: "physical", - iface: &physicalInterface{logicalInterface{ - hwaddr: net.HardwareAddr([]byte{0, 1, 2, 3, 4, 5}), - }}, - }, - { - name: "testname", - network: "[Match]\nName=testname\n\n[Network]\nBond=testbond1\nVLAN=testvlan1\nVLAN=testvlan2\n", - kind: "physical", - iface: &physicalInterface{logicalInterface{ - name: "testname", - children: []networkInterface{ - &bondInterface{logicalInterface: logicalInterface{name: "testbond1"}}, - &vlanInterface{logicalInterface: logicalInterface{name: "testvlan1"}, id: 1}, - &vlanInterface{logicalInterface: logicalInterface{name: "testvlan2"}, id: 1}, - }, - }}, - }, - { - name: "testname", - netdev: "[NetDev]\nKind=bond\nName=testname\n\n[Bond]\n", - network: "[Match]\nName=testname\n\n[Network]\nBond=testbond1\nVLAN=testvlan1\nVLAN=testvlan2\nDHCP=true\n", - kind: "bond", - iface: &bondInterface{logicalInterface: logicalInterface{ - name: "testname", - config: configMethodDHCP{}, - children: []networkInterface{ - &bondInterface{logicalInterface: logicalInterface{name: "testbond1"}}, - &vlanInterface{logicalInterface: logicalInterface{name: "testvlan1"}, id: 1}, - &vlanInterface{logicalInterface: logicalInterface{name: "testvlan2"}, id: 1}, - }, - }}, - }, - { - name: "testname", - netdev: "[NetDev]\nKind=vlan\nName=testname\n\n[VLAN]\nId=1\n", - network: "[Match]\nName=testname\n\n[Network]\n", - kind: "vlan", - iface: &vlanInterface{logicalInterface{name: "testname"}, 1, ""}, - }, - { - name: "testname", - netdev: "[NetDev]\nKind=vlan\nName=testname\nMACAddress=00:01:02:03:04:05\n\n[VLAN]\nId=1\n", - network: "[Match]\nName=testname\n\n[Network]\n", - kind: "vlan", - iface: &vlanInterface{logicalInterface{name: "testname", config: configMethodStatic{hwaddress: net.HardwareAddr([]byte{0, 1, 2, 3, 4, 5})}}, 1, ""}, - }, - { - name: "testname", - netdev: "[NetDev]\nKind=vlan\nName=testname\nMACAddress=00:01:02:03:04:05\n\n[VLAN]\nId=1\n", - network: "[Match]\nName=testname\n\n[Network]\nDHCP=true\n", - kind: "vlan", - iface: &vlanInterface{logicalInterface{name: "testname", config: configMethodDHCP{hwaddress: net.HardwareAddr([]byte{0, 1, 2, 3, 4, 5})}}, 1, ""}, - }, - { - name: "testname", - netdev: "[NetDev]\nKind=vlan\nName=testname\n\n[VLAN]\nId=0\n", - network: "[Match]\nName=testname\n\n[Network]\nDNS=8.8.8.8\n\n[Address]\nAddress=192.168.1.100/24\n\n[Route]\nDestination=0.0.0.0/0\nGateway=1.2.3.4\n", - kind: "vlan", - iface: &vlanInterface{logicalInterface: logicalInterface{ - name: "testname", - config: configMethodStatic{ - addresses: []net.IPNet{{IP: []byte{192, 168, 1, 100}, Mask: []byte{255, 255, 255, 0}}}, - nameservers: []net.IP{[]byte{8, 8, 8, 8}}, - routes: []route{route{destination: net.IPNet{IP: []byte{0, 0, 0, 0}, Mask: []byte{0, 0, 0, 0}}, gateway: []byte{1, 2, 3, 4}}}, - }, - }}, - }, - } { - if name := tt.iface.Name(); name != tt.name { - t.Fatalf("bad name (%q): want %q, got %q", tt.iface, tt.name, name) - } - if netdev := tt.iface.Netdev(); netdev != tt.netdev { - t.Fatalf("bad netdev (%q): want %q, got %q", tt.iface, tt.netdev, netdev) - } - if link := tt.iface.Link(); link != tt.link { - t.Fatalf("bad link (%q): want %q, got %q", tt.iface, tt.link, link) - } - if network := tt.iface.Network(); network != tt.network { - t.Fatalf("bad network (%q): want %q, got %q", tt.iface, tt.network, network) - } - if kind := tt.iface.Type(); kind != tt.kind { - t.Fatalf("bad type (%q): want %q, got %q", tt.iface, tt.kind, kind) - } - } -} - -func TestModprobeParams(t *testing.T) { - for _, tt := range []struct { - i InterfaceGenerator - p string - }{ - { - i: &physicalInterface{}, - p: "", - }, - { - i: &vlanInterface{}, - p: "", - }, - { - i: &bondInterface{ - logicalInterface{}, - nil, - map[string]string{ - "a": "1", - "b": "2", - }, - }, - p: "a=1 b=2", - }, - } { - if p := tt.i.ModprobeParams(); p != tt.p { - t.Fatalf("bad params (%q): got %s, want %s", tt.i, p, tt.p) - } - } -} - -func TestBuildInterfacesLo(t *testing.T) { - stanzas := []*stanzaInterface{ - &stanzaInterface{ - name: "lo", - kind: interfacePhysical, - auto: false, - configMethod: configMethodLoopback{}, - options: map[string][]string{}, - }, - } - interfaces := buildInterfaces(stanzas) - if len(interfaces) != 0 { - t.FailNow() - } -} - -func TestBuildInterfacesBlindBond(t *testing.T) { - stanzas := []*stanzaInterface{ - { - name: "bond0", - kind: interfaceBond, - auto: false, - configMethod: configMethodManual{}, - options: map[string][]string{ - "bond-slaves": []string{"eth0"}, - }, - }, - } - interfaces := buildInterfaces(stanzas) - bond0 := &bondInterface{ - logicalInterface{ - name: "bond0", - config: configMethodManual{}, - children: []networkInterface{}, - configDepth: 0, - }, - []string{"eth0"}, - map[string]string{}, - } - eth0 := &physicalInterface{ - logicalInterface{ - name: "eth0", - config: configMethodManual{}, - children: []networkInterface{bond0}, - configDepth: 1, - }, - } - expect := []InterfaceGenerator{bond0, eth0} - if !reflect.DeepEqual(interfaces, expect) { - t.FailNow() - } -} - -func TestBuildInterfacesBlindVLAN(t *testing.T) { - stanzas := []*stanzaInterface{ - { - name: "vlan0", - kind: interfaceVLAN, - auto: false, - configMethod: configMethodManual{}, - options: map[string][]string{ - "id": []string{"0"}, - "raw_device": []string{"eth0"}, - }, - }, - } - interfaces := buildInterfaces(stanzas) - vlan0 := &vlanInterface{ - logicalInterface{ - name: "vlan0", - config: configMethodManual{}, - children: []networkInterface{}, - configDepth: 0, - }, - 0, - "eth0", - } - eth0 := &physicalInterface{ - logicalInterface{ - name: "eth0", - config: configMethodManual{}, - children: []networkInterface{vlan0}, - configDepth: 1, - }, - } - expect := []InterfaceGenerator{eth0, vlan0} - if !reflect.DeepEqual(interfaces, expect) { - t.FailNow() - } -} - -func TestBuildInterfaces(t *testing.T) { - stanzas := []*stanzaInterface{ - &stanzaInterface{ - name: "eth0", - kind: interfacePhysical, - auto: false, - configMethod: configMethodManual{}, - options: map[string][]string{}, - }, - &stanzaInterface{ - name: "bond0", - kind: interfaceBond, - auto: false, - configMethod: configMethodManual{}, - options: map[string][]string{ - "bond-slaves": []string{"eth0"}, - "bond-mode": []string{"4"}, - "bond-miimon": []string{"100"}, - }, - }, - &stanzaInterface{ - name: "bond1", - kind: interfaceBond, - auto: false, - configMethod: configMethodManual{}, - options: map[string][]string{ - "bond-slaves": []string{"bond0"}, - }, - }, - &stanzaInterface{ - name: "vlan0", - kind: interfaceVLAN, - auto: false, - configMethod: configMethodManual{}, - options: map[string][]string{ - "id": []string{"0"}, - "raw_device": []string{"eth0"}, - }, - }, - &stanzaInterface{ - name: "vlan1", - kind: interfaceVLAN, - auto: false, - configMethod: configMethodManual{}, - options: map[string][]string{ - "id": []string{"1"}, - "raw_device": []string{"bond0"}, - }, - }, - } - interfaces := buildInterfaces(stanzas) - vlan1 := &vlanInterface{ - logicalInterface{ - name: "vlan1", - config: configMethodManual{}, - children: []networkInterface{}, - configDepth: 0, - }, - 1, - "bond0", - } - vlan0 := &vlanInterface{ - logicalInterface{ - name: "vlan0", - config: configMethodManual{}, - children: []networkInterface{}, - configDepth: 0, - }, - 0, - "eth0", - } - bond1 := &bondInterface{ - logicalInterface{ - name: "bond1", - config: configMethodManual{}, - children: []networkInterface{}, - configDepth: 0, - }, - []string{"bond0"}, - map[string]string{}, - } - bond0 := &bondInterface{ - logicalInterface{ - name: "bond0", - config: configMethodManual{}, - children: []networkInterface{bond1, vlan1}, - configDepth: 1, - }, - []string{"eth0"}, - map[string]string{ - "mode": "4", - "miimon": "100", - }, - } - eth0 := &physicalInterface{ - logicalInterface{ - name: "eth0", - config: configMethodManual{}, - children: []networkInterface{bond0, vlan0}, - configDepth: 2, - }, - } - expect := []InterfaceGenerator{bond0, bond1, eth0, vlan0, vlan1} - if !reflect.DeepEqual(interfaces, expect) { - t.FailNow() - } -} - -func TestFilename(t *testing.T) { - for _, tt := range []struct { - i logicalInterface - f string - }{ - {logicalInterface{name: "iface", configDepth: 0}, "00-iface"}, - {logicalInterface{name: "iface", configDepth: 9}, "09-iface"}, - {logicalInterface{name: "iface", configDepth: 10}, "0a-iface"}, - {logicalInterface{name: "iface", configDepth: 53}, "35-iface"}, - {logicalInterface{hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), configDepth: 1}, "01-01:23:45:67:89:ab"}, - {logicalInterface{name: "iface", hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), configDepth: 1}, "01-iface"}, - } { - if tt.i.Filename() != tt.f { - t.Fatalf("bad filename (%q): got %q, want %q", tt.i, tt.i.Filename(), tt.f) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/is_go15_false_test.go b/vendor/github.com/coreos/coreos-cloudinit/network/is_go15_false_test.go deleted file mode 100644 index 85d5f0db..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/is_go15_false_test.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !go1.5 - -package network - -const isGo15 = false diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/is_go15_true_test.go b/vendor/github.com/coreos/coreos-cloudinit/network/is_go15_true_test.go deleted file mode 100644 index 953836de..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/is_go15_true_test.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build go1.5 - -package network - -const isGo15 = true diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/packet.go b/vendor/github.com/coreos/coreos-cloudinit/network/packet.go deleted file mode 100644 index a2834ff9..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/packet.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "net" - - "github.com/coreos/coreos-cloudinit/datasource/metadata/packet" -) - -func ProcessPacketNetconf(netdata packet.NetworkData) ([]InterfaceGenerator, error) { - var nameservers []net.IP - if netdata.DNS != nil { - nameservers = netdata.DNS - } else { - nameservers = append(nameservers, net.ParseIP("8.8.8.8"), net.ParseIP("8.8.4.4")) - } - - generators, err := parseNetwork(netdata, nameservers) - if err != nil { - return nil, err - } - - return generators, nil -} - -func parseNetwork(netdata packet.NetworkData, nameservers []net.IP) ([]InterfaceGenerator, error) { - var interfaces []InterfaceGenerator - var addresses []net.IPNet - var routes []route - for _, netblock := range netdata.Netblocks { - addresses = append(addresses, net.IPNet{ - IP: netblock.Address, - Mask: net.IPMask(netblock.Netmask), - }) - if netblock.Public == false { - routes = append(routes, route{ - destination: net.IPNet{ - IP: net.IPv4(10, 0, 0, 0), - Mask: net.IPv4Mask(255, 0, 0, 0), - }, - gateway: netblock.Gateway, - }) - } else { - if netblock.AddressFamily == 4 { - routes = append(routes, route{ - destination: net.IPNet{ - IP: net.IPv4zero, - Mask: net.IPMask(net.IPv4zero), - }, - gateway: netblock.Gateway, - }) - } else { - routes = append(routes, route{ - destination: net.IPNet{ - IP: net.IPv6zero, - Mask: net.IPMask(net.IPv6zero), - }, - gateway: netblock.Gateway, - }) - } - } - } - - bond := bondInterface{ - logicalInterface: logicalInterface{ - name: "bond0", - config: configMethodStatic{ - addresses: addresses, - nameservers: nameservers, - routes: routes, - }, - }, - options: map[string]string{ - "Mode": "802.3ad", - "LACPTransmitRate": "fast", - "MIIMonitorSec": ".2", - "UpDelaySec": ".2", - "DownDelaySec": ".2", - }, - } - - bond.hwaddr, _ = net.ParseMAC(netdata.Interfaces[0].Mac) - - for index, iface := range netdata.Interfaces { - bond.slaves = append(bond.slaves, iface.Name) - - interfaces = append(interfaces, &physicalInterface{ - logicalInterface: logicalInterface{ - name: iface.Name, - config: configMethodStatic{ - nameservers: nameservers, - }, - children: []networkInterface{&bond}, - configDepth: index, - }, - }) - } - - interfaces = append(interfaces, &bond) - - return interfaces, nil -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/stanza.go b/vendor/github.com/coreos/coreos-cloudinit/network/stanza.go deleted file mode 100644 index 88bca813..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/stanza.go +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -type stanza interface{} - -type stanzaAuto struct { - interfaces []string -} - -type stanzaInterface struct { - name string - kind interfaceKind - auto bool - configMethod configMethod - options map[string][]string -} - -type interfaceKind int - -const ( - interfaceBond = interfaceKind(iota) - interfacePhysical - interfaceVLAN -) - -type route struct { - destination net.IPNet - gateway net.IP -} - -type configMethod interface{} - -type configMethodStatic struct { - addresses []net.IPNet - nameservers []net.IP - routes []route - hwaddress net.HardwareAddr -} - -type configMethodLoopback struct{} - -type configMethodManual struct{} - -type configMethodDHCP struct { - hwaddress net.HardwareAddr -} - -func parseStanzas(lines []string) (stanzas []stanza, err error) { - rawStanzas, err := splitStanzas(lines) - if err != nil { - return nil, err - } - - stanzas = make([]stanza, 0, len(rawStanzas)) - for _, rawStanza := range rawStanzas { - if stanza, err := parseStanza(rawStanza); err == nil { - stanzas = append(stanzas, stanza) - } else { - return nil, err - } - } - - autos := make([]string, 0) - interfaceMap := make(map[string]*stanzaInterface) - for _, stanza := range stanzas { - switch c := stanza.(type) { - case *stanzaAuto: - autos = append(autos, c.interfaces...) - case *stanzaInterface: - interfaceMap[c.name] = c - } - } - - // Apply the auto attribute - for _, auto := range autos { - if iface, ok := interfaceMap[auto]; ok { - iface.auto = true - } - } - - return stanzas, nil -} - -func splitStanzas(lines []string) ([][]string, error) { - var curStanza []string - stanzas := make([][]string, 0) - for _, line := range lines { - if isStanzaStart(line) { - if curStanza != nil { - stanzas = append(stanzas, curStanza) - } - curStanza = []string{line} - } else if curStanza != nil { - curStanza = append(curStanza, line) - } else { - return nil, fmt.Errorf("missing stanza start %q", line) - } - } - - if curStanza != nil { - stanzas = append(stanzas, curStanza) - } - - return stanzas, nil -} - -func isStanzaStart(line string) bool { - switch strings.Split(line, " ")[0] { - case "auto": - fallthrough - case "iface": - fallthrough - case "mapping": - return true - } - - if strings.HasPrefix(line, "allow-") { - return true - } - - return false -} - -func parseStanza(rawStanza []string) (stanza, error) { - if len(rawStanza) == 0 { - panic("empty stanza") - } - tokens := strings.Fields(rawStanza[0]) - if len(tokens) < 2 { - return nil, fmt.Errorf("malformed stanza start %q", rawStanza[0]) - } - - kind := tokens[0] - attributes := tokens[1:] - - switch kind { - case "auto": - return parseAutoStanza(attributes, rawStanza[1:]) - case "iface": - return parseInterfaceStanza(attributes, rawStanza[1:]) - default: - return nil, fmt.Errorf("unknown stanza %q", kind) - } -} - -func parseAutoStanza(attributes []string, options []string) (*stanzaAuto, error) { - return &stanzaAuto{interfaces: attributes}, nil -} - -func parseInterfaceStanza(attributes []string, options []string) (*stanzaInterface, error) { - if len(attributes) != 3 { - return nil, fmt.Errorf("incorrect number of attributes") - } - - iface := attributes[0] - confMethod := attributes[2] - - optionMap := make(map[string][]string, 0) - for _, option := range options { - if strings.HasPrefix(option, "post-up") { - tokens := strings.SplitAfterN(option, " ", 2) - if len(tokens) != 2 { - continue - } - if v, ok := optionMap["post-up"]; ok { - optionMap["post-up"] = append(v, tokens[1]) - } else { - optionMap["post-up"] = []string{tokens[1]} - } - } else if strings.HasPrefix(option, "pre-down") { - tokens := strings.SplitAfterN(option, " ", 2) - if len(tokens) != 2 { - continue - } - if v, ok := optionMap["pre-down"]; ok { - optionMap["pre-down"] = append(v, tokens[1]) - } else { - optionMap["pre-down"] = []string{tokens[1]} - } - } else { - tokens := strings.Fields(option) - optionMap[tokens[0]] = tokens[1:] - } - } - - var conf configMethod - switch confMethod { - case "static": - config := configMethodStatic{ - addresses: make([]net.IPNet, 1), - routes: make([]route, 0), - nameservers: make([]net.IP, 0), - } - if addresses, ok := optionMap["address"]; ok { - if len(addresses) == 1 { - config.addresses[0].IP = net.ParseIP(addresses[0]) - } - } - if netmasks, ok := optionMap["netmask"]; ok { - if len(netmasks) == 1 { - config.addresses[0].Mask = net.IPMask(net.ParseIP(netmasks[0]).To4()) - } - } - if config.addresses[0].IP == nil || config.addresses[0].Mask == nil { - return nil, fmt.Errorf("malformed static network config for %q", iface) - } - if gateways, ok := optionMap["gateway"]; ok { - if len(gateways) == 1 { - config.routes = append(config.routes, route{ - destination: net.IPNet{ - IP: net.IPv4(0, 0, 0, 0), - Mask: net.IPv4Mask(0, 0, 0, 0), - }, - gateway: net.ParseIP(gateways[0]), - }) - } - } - if hwaddress, err := parseHwaddress(optionMap, iface); err == nil { - config.hwaddress = hwaddress - } else { - return nil, err - } - for _, nameserver := range optionMap["dns-nameservers"] { - config.nameservers = append(config.nameservers, net.ParseIP(nameserver)) - } - for _, postup := range optionMap["post-up"] { - if strings.HasPrefix(postup, "route add") { - route := route{} - fields := strings.Fields(postup) - for i, field := range fields[:len(fields)-1] { - switch field { - case "-net": - if _, dst, err := net.ParseCIDR(fields[i+1]); err == nil { - route.destination = *dst - } else { - route.destination.IP = net.ParseIP(fields[i+1]) - } - case "netmask": - route.destination.Mask = net.IPMask(net.ParseIP(fields[i+1]).To4()) - case "gw": - route.gateway = net.ParseIP(fields[i+1]) - } - } - if route.destination.IP != nil && route.destination.Mask != nil && route.gateway != nil { - config.routes = append(config.routes, route) - } - } - } - conf = config - case "loopback": - conf = configMethodLoopback{} - case "manual": - conf = configMethodManual{} - case "dhcp": - config := configMethodDHCP{} - if hwaddress, err := parseHwaddress(optionMap, iface); err == nil { - config.hwaddress = hwaddress - } else { - return nil, err - } - conf = config - default: - return nil, fmt.Errorf("invalid config method %q", confMethod) - } - - if _, ok := optionMap["vlan_raw_device"]; ok { - return parseVLANStanza(iface, conf, attributes, optionMap) - } - - if strings.Contains(iface, ".") { - return parseVLANStanza(iface, conf, attributes, optionMap) - } - - if _, ok := optionMap["bond-slaves"]; ok { - return parseBondStanza(iface, conf, attributes, optionMap) - } - - return parsePhysicalStanza(iface, conf, attributes, optionMap) -} - -func parseHwaddress(options map[string][]string, iface string) (net.HardwareAddr, error) { - if hwaddress, ok := options["hwaddress"]; ok && len(hwaddress) == 2 { - switch hwaddress[0] { - case "ether": - if address, err := net.ParseMAC(hwaddress[1]); err == nil { - return address, nil - } - return nil, fmt.Errorf("malformed hwaddress option for %q", iface) - } - } - return nil, nil -} - -func parseBondStanza(iface string, conf configMethod, attributes []string, options map[string][]string) (*stanzaInterface, error) { - return &stanzaInterface{name: iface, kind: interfaceBond, configMethod: conf, options: options}, nil -} - -func parsePhysicalStanza(iface string, conf configMethod, attributes []string, options map[string][]string) (*stanzaInterface, error) { - return &stanzaInterface{name: iface, kind: interfacePhysical, configMethod: conf, options: options}, nil -} - -func parseVLANStanza(iface string, conf configMethod, attributes []string, options map[string][]string) (*stanzaInterface, error) { - var id string - if strings.Contains(iface, ".") { - tokens := strings.Split(iface, ".") - id = tokens[len(tokens)-1] - } else if strings.HasPrefix(iface, "vlan") { - id = strings.TrimPrefix(iface, "vlan") - } else { - return nil, fmt.Errorf("malformed vlan name %q", iface) - } - - if _, err := strconv.Atoi(id); err != nil { - return nil, fmt.Errorf("malformed vlan name %q", iface) - } - options["id"] = []string{id} - options["raw_device"] = options["vlan_raw_device"] - - return &stanzaInterface{name: iface, kind: interfaceVLAN, configMethod: conf, options: options}, nil -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/stanza_test.go b/vendor/github.com/coreos/coreos-cloudinit/network/stanza_test.go deleted file mode 100644 index 9f476380..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/stanza_test.go +++ /dev/null @@ -1,582 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "net" - "reflect" - "strings" - "testing" -) - -func TestSplitStanzasNoParent(t *testing.T) { - in := []string{"test"} - e := "missing stanza start" - _, err := splitStanzas(in) - if err == nil || !strings.HasPrefix(err.Error(), e) { - t.Fatalf("bad error for splitStanzas(%q): got %q, want %q", in, err, e) - } -} - -func TestBadParseStanzas(t *testing.T) { - for in, e := range map[string]string{ - "": "missing stanza start", - "iface": "malformed stanza start", - "allow-?? unknown": "unknown stanza", - } { - _, err := parseStanzas([]string{in}) - if err == nil || !strings.HasPrefix(err.Error(), e) { - t.Fatalf("bad error for parseStanzas(%q): got %q, want %q", in, err, e) - } - - } -} - -func TestBadParseInterfaceStanza(t *testing.T) { - for _, tt := range []struct { - in []string - opts []string - e string - }{ - {[]string{}, nil, "incorrect number of attributes"}, - {[]string{"eth", "inet", "invalid"}, nil, "invalid config method"}, - {[]string{"eth", "inet", "static"}, []string{"address 192.168.1.100"}, "malformed static network config"}, - {[]string{"eth", "inet", "static"}, []string{"netmask 255.255.255.0"}, "malformed static network config"}, - {[]string{"eth", "inet", "static"}, []string{"address invalid", "netmask 255.255.255.0"}, "malformed static network config"}, - {[]string{"eth", "inet", "static"}, []string{"address 192.168.1.100", "netmask invalid"}, "malformed static network config"}, - {[]string{"eth", "inet", "static"}, []string{"address 192.168.1.100", "netmask 255.255.255.0", "hwaddress ether NotAnAddress"}, "malformed hwaddress option"}, - {[]string{"eth", "inet", "dhcp"}, []string{"hwaddress ether NotAnAddress"}, "malformed hwaddress option"}, - } { - _, err := parseInterfaceStanza(tt.in, tt.opts) - if err == nil || !strings.HasPrefix(err.Error(), tt.e) { - t.Fatalf("bad error parsing interface stanza %q: got %q, want %q", tt.in, err.Error(), tt.e) - } - } -} - -func TestBadParseVLANStanzas(t *testing.T) { - conf := configMethodManual{} - options := map[string][]string{} - for _, in := range []string{"myvlan", "eth.vlan"} { - _, err := parseVLANStanza(in, conf, nil, options) - if err == nil || !strings.HasPrefix(err.Error(), "malformed vlan name") { - t.Fatalf("did not error on bad vlan %q", in) - } - } -} - -func TestSplitStanzas(t *testing.T) { - expect := [][]string{ - {"auto lo"}, - {"iface eth1", "option: 1"}, - {"mapping"}, - {"allow-"}, - } - lines := make([]string, 0, 5) - for _, stanza := range expect { - for _, line := range stanza { - lines = append(lines, line) - } - } - - stanzas, err := splitStanzas(lines) - if err != nil { - t.FailNow() - } - for i, stanza := range stanzas { - if len(stanza) != len(expect[i]) { - t.FailNow() - } - for j, line := range stanza { - if line != expect[i][j] { - t.FailNow() - } - } - } -} - -func TestParseStanzaNil(t *testing.T) { - defer func() { - if r := recover(); r == nil { - t.Fatal("parseStanza(nil) did not panic") - } - }() - parseStanza(nil) -} - -func TestParseStanzaSuccess(t *testing.T) { - for _, in := range []string{ - "auto a", - "iface a inet manual", - } { - if _, err := parseStanza([]string{in}); err != nil { - t.Fatalf("unexpected error parsing stanza %q: %s", in, err) - } - } -} - -func TestParseAutoStanza(t *testing.T) { - interfaces := []string{"test", "attribute"} - stanza, err := parseAutoStanza(interfaces, nil) - if err != nil { - t.Fatalf("unexpected error parsing auto stanza %q: %s", interfaces, err) - } - if !reflect.DeepEqual(stanza.interfaces, interfaces) { - t.FailNow() - } -} - -func TestParseBondStanzaNoSlaves(t *testing.T) { - bond, err := parseBondStanza("", nil, nil, map[string][]string{}) - if err != nil { - t.FailNow() - } - if bond.options["bond-slaves"] != nil { - t.FailNow() - } -} - -func TestParseBondStanza(t *testing.T) { - conf := configMethodManual{} - options := map[string][]string{ - "bond-slaves": []string{"1", "2"}, - } - bond, err := parseBondStanza("test", conf, nil, options) - if err != nil { - t.FailNow() - } - if bond.name != "test" { - t.FailNow() - } - if bond.kind != interfaceBond { - t.FailNow() - } - if bond.configMethod != conf { - t.FailNow() - } -} - -func TestParsePhysicalStanza(t *testing.T) { - conf := configMethodManual{} - options := map[string][]string{ - "a": []string{"1", "2"}, - "b": []string{"1"}, - } - physical, err := parsePhysicalStanza("test", conf, nil, options) - if err != nil { - t.FailNow() - } - if physical.name != "test" { - t.FailNow() - } - if physical.kind != interfacePhysical { - t.FailNow() - } - if physical.configMethod != conf { - t.FailNow() - } - if !reflect.DeepEqual(physical.options, options) { - t.FailNow() - } -} - -func TestParseVLANStanzas(t *testing.T) { - conf := configMethodManual{} - options := map[string][]string{} - for _, in := range []string{"vlan25", "eth.25"} { - vlan, err := parseVLANStanza(in, conf, nil, options) - if err != nil { - t.Fatalf("unexpected error from parseVLANStanza(%q): %s", in, err) - } - if !reflect.DeepEqual(vlan.options["id"], []string{"25"}) { - t.FailNow() - } - } -} - -func TestParseInterfaceStanzaStaticAddress(t *testing.T) { - options := []string{"address 192.168.1.100", "netmask 255.255.255.0"} - expect := []net.IPNet{ - { - IP: net.IPv4(192, 168, 1, 100), - Mask: net.IPv4Mask(255, 255, 255, 0), - }, - } - - iface, err := parseInterfaceStanza([]string{"eth", "inet", "static"}, options) - if err != nil { - t.FailNow() - } - static, ok := iface.configMethod.(configMethodStatic) - if !ok { - t.FailNow() - } - if !reflect.DeepEqual(static.addresses, expect) { - t.FailNow() - } -} - -func TestParseInterfaceStanzaStaticGateway(t *testing.T) { - options := []string{"address 192.168.1.100", "netmask 255.255.255.0", "gateway 192.168.1.1"} - expect := []route{ - { - destination: net.IPNet{ - IP: net.IPv4(0, 0, 0, 0), - Mask: net.IPv4Mask(0, 0, 0, 0), - }, - gateway: net.IPv4(192, 168, 1, 1), - }, - } - - iface, err := parseInterfaceStanza([]string{"eth", "inet", "static"}, options) - if err != nil { - t.FailNow() - } - static, ok := iface.configMethod.(configMethodStatic) - if !ok { - t.FailNow() - } - if !reflect.DeepEqual(static.routes, expect) { - t.FailNow() - } -} - -func TestParseInterfaceStanzaStaticDNS(t *testing.T) { - options := []string{"address 192.168.1.100", "netmask 255.255.255.0", "dns-nameservers 192.168.1.10 192.168.1.11 192.168.1.12"} - expect := []net.IP{ - net.IPv4(192, 168, 1, 10), - net.IPv4(192, 168, 1, 11), - net.IPv4(192, 168, 1, 12), - } - iface, err := parseInterfaceStanza([]string{"eth", "inet", "static"}, options) - if err != nil { - t.FailNow() - } - static, ok := iface.configMethod.(configMethodStatic) - if !ok { - t.FailNow() - } - if !reflect.DeepEqual(static.nameservers, expect) { - t.FailNow() - } -} - -func TestBadParseInterfaceStanzasStaticPostUp(t *testing.T) { - for _, in := range []string{ - "post-up invalid", - "post-up route add", - "post-up route add -net", - "post-up route add gw", - "post-up route add netmask", - "gateway", - "gateway 192.168.1.1 192.168.1.2", - } { - options := []string{"address 192.168.1.100", "netmask 255.255.255.0", in} - iface, err := parseInterfaceStanza([]string{"eth", "inet", "static"}, options) - if err != nil { - t.Fatalf("parseInterfaceStanza with options %s got unexpected error", options) - } - static, ok := iface.configMethod.(configMethodStatic) - if !ok { - t.Fatalf("parseInterfaceStanza with options %s did not return configMethodStatic", options) - } - if len(static.routes) != 0 { - t.Fatalf("parseInterfaceStanza with options %s did not return zero-length static routes", options) - } - } -} - -func TestParseInterfaceStanzaStaticPostUp(t *testing.T) { - for _, tt := range []struct { - options []string - expect []route - }{ - { - options: []string{ - "address 192.168.1.100", - "netmask 255.255.255.0", - "post-up route add gw 192.168.1.1 -net 192.168.1.0 netmask 255.255.255.0", - }, - expect: []route{ - { - destination: net.IPNet{ - IP: net.IPv4(192, 168, 1, 0), - Mask: net.IPv4Mask(255, 255, 255, 0), - }, - gateway: net.IPv4(192, 168, 1, 1), - }, - }, - }, - { - options: []string{ - "address 192.168.1.100", - "netmask 255.255.255.0", - "post-up route add gw 192.168.1.1 -net 192.168.1.0/24 || true", - }, - expect: []route{ - { - destination: func() net.IPNet { - if _, net, err := net.ParseCIDR("192.168.1.0/24"); err == nil { - return *net - } else { - panic(err) - } - }(), - gateway: net.IPv4(192, 168, 1, 1), - }, - }, - }, - } { - iface, err := parseInterfaceStanza([]string{"eth", "inet", "static"}, tt.options) - if err != nil { - t.Fatalf("bad error (%+v): want nil, got %s\n", tt, err) - } - static, ok := iface.configMethod.(configMethodStatic) - if !ok { - t.Fatalf("bad config method (%+v): want configMethodStatic, got %T\n", tt, iface.configMethod) - } - if !reflect.DeepEqual(static.routes, tt.expect) { - t.Fatalf("bad routes (%+v): want %#v, got %#v\n", tt, tt.expect, static.routes) - } - } -} - -func TestParseInterfaceStanzaLoopback(t *testing.T) { - iface, err := parseInterfaceStanza([]string{"eth", "inet", "loopback"}, nil) - if err != nil { - t.FailNow() - } - if _, ok := iface.configMethod.(configMethodLoopback); !ok { - t.FailNow() - } -} - -func TestParseInterfaceStanzaManual(t *testing.T) { - iface, err := parseInterfaceStanza([]string{"eth", "inet", "manual"}, nil) - if err != nil { - t.FailNow() - } - if _, ok := iface.configMethod.(configMethodManual); !ok { - t.FailNow() - } -} - -func TestParseInterfaceStanzaDHCP(t *testing.T) { - iface, err := parseInterfaceStanza([]string{"eth", "inet", "dhcp"}, nil) - if err != nil { - t.FailNow() - } - if _, ok := iface.configMethod.(configMethodDHCP); !ok { - t.FailNow() - } -} - -func TestParseInterfaceStanzaPostUpOption(t *testing.T) { - options := []string{ - "post-up", - "post-up 1 2", - "post-up 3 4", - } - iface, err := parseInterfaceStanza([]string{"eth", "inet", "manual"}, options) - if err != nil { - t.FailNow() - } - if !reflect.DeepEqual(iface.options["post-up"], []string{"1 2", "3 4"}) { - t.Log(iface.options["post-up"]) - t.FailNow() - } -} - -func TestParseInterfaceStanzaPreDownOption(t *testing.T) { - options := []string{ - "pre-down", - "pre-down 3", - "pre-down 4", - } - iface, err := parseInterfaceStanza([]string{"eth", "inet", "manual"}, options) - if err != nil { - t.FailNow() - } - if !reflect.DeepEqual(iface.options["pre-down"], []string{"3", "4"}) { - t.Log(iface.options["pre-down"]) - t.FailNow() - } -} - -func TestParseInterfaceStanzaEmptyOption(t *testing.T) { - options := []string{ - "test", - } - iface, err := parseInterfaceStanza([]string{"eth", "inet", "manual"}, options) - if err != nil { - t.FailNow() - } - if !reflect.DeepEqual(iface.options["test"], []string{}) { - t.FailNow() - } -} - -func TestParseInterfaceStanzaOptions(t *testing.T) { - options := []string{ - "test1 1", - "test2 2 3", - "test1 5 6", - } - iface, err := parseInterfaceStanza([]string{"eth", "inet", "manual"}, options) - if err != nil { - t.FailNow() - } - if !reflect.DeepEqual(iface.options["test1"], []string{"5", "6"}) { - t.Log(iface.options["test1"]) - t.FailNow() - } - if !reflect.DeepEqual(iface.options["test2"], []string{"2", "3"}) { - t.Log(iface.options["test2"]) - t.FailNow() - } -} - -func TestParseInterfaceStanzaHwaddress(t *testing.T) { - for _, tt := range []struct { - attr []string - opt []string - hw net.HardwareAddr - }{ - { - []string{"mybond", "inet", "dhcp"}, - []string{}, - nil, - }, - { - []string{"mybond", "inet", "dhcp"}, - []string{"hwaddress ether 00:01:02:03:04:05"}, - net.HardwareAddr([]byte{0, 1, 2, 3, 4, 5}), - }, - { - []string{"mybond", "inet", "static"}, - []string{"hwaddress ether 00:01:02:03:04:05", "address 192.168.1.100", "netmask 255.255.255.0"}, - net.HardwareAddr([]byte{0, 1, 2, 3, 4, 5}), - }, - } { - iface, err := parseInterfaceStanza(tt.attr, tt.opt) - if err != nil { - t.Fatalf("error in parseInterfaceStanza (%q, %q): %q", tt.attr, tt.opt, err) - } - switch c := iface.configMethod.(type) { - case configMethodStatic: - if !reflect.DeepEqual(c.hwaddress, tt.hw) { - t.Fatalf("bad hwaddress (%q, %q): got %q, want %q", tt.attr, tt.opt, c.hwaddress, tt.hw) - } - case configMethodDHCP: - if !reflect.DeepEqual(c.hwaddress, tt.hw) { - t.Fatalf("bad hwaddress (%q, %q): got %q, want %q", tt.attr, tt.opt, c.hwaddress, tt.hw) - } - } - } -} - -func TestParseInterfaceStanzaBond(t *testing.T) { - iface, err := parseInterfaceStanza([]string{"mybond", "inet", "manual"}, []string{"bond-slaves eth"}) - if err != nil { - t.FailNow() - } - if iface.kind != interfaceBond { - t.FailNow() - } -} - -func TestParseInterfaceStanzaVLANName(t *testing.T) { - iface, err := parseInterfaceStanza([]string{"eth0.1", "inet", "manual"}, nil) - if err != nil { - t.FailNow() - } - if iface.kind != interfaceVLAN { - t.FailNow() - } -} - -func TestParseInterfaceStanzaVLANOption(t *testing.T) { - iface, err := parseInterfaceStanza([]string{"vlan1", "inet", "manual"}, []string{"vlan_raw_device eth"}) - if err != nil { - t.FailNow() - } - if iface.kind != interfaceVLAN { - t.FailNow() - } -} - -func TestParseStanzasNone(t *testing.T) { - stanzas, err := parseStanzas(nil) - if err != nil { - t.FailNow() - } - if len(stanzas) != 0 { - t.FailNow() - } -} - -func TestParseStanzas(t *testing.T) { - lines := []string{ - "auto lo", - "iface lo inet loopback", - "iface eth1 inet manual", - "iface eth2 inet manual", - "iface eth3 inet manual", - "auto eth1 eth3", - } - expect := []stanza{ - &stanzaAuto{ - interfaces: []string{"lo"}, - }, - &stanzaInterface{ - name: "lo", - kind: interfacePhysical, - auto: true, - configMethod: configMethodLoopback{}, - options: map[string][]string{}, - }, - &stanzaInterface{ - name: "eth1", - kind: interfacePhysical, - auto: true, - configMethod: configMethodManual{}, - options: map[string][]string{}, - }, - &stanzaInterface{ - name: "eth2", - kind: interfacePhysical, - auto: false, - configMethod: configMethodManual{}, - options: map[string][]string{}, - }, - &stanzaInterface{ - name: "eth3", - kind: interfacePhysical, - auto: true, - configMethod: configMethodManual{}, - options: map[string][]string{}, - }, - &stanzaAuto{ - interfaces: []string{"eth1", "eth3"}, - }, - } - stanzas, err := parseStanzas(lines) - if err != err { - t.FailNow() - } - if !reflect.DeepEqual(stanzas, expect) { - t.FailNow() - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/vmware.go b/vendor/github.com/coreos/coreos-cloudinit/network/vmware.go deleted file mode 100644 index 230be42a..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/vmware.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "fmt" - "log" - "net" -) - -func ProcessVMwareNetconf(config map[string]string) ([]InterfaceGenerator, error) { - log.Println("Processing VMware network config") - - log.Println("Parsing nameservers") - var nameservers []net.IP - for i := 0; ; i++ { - if ipStr, ok := config[fmt.Sprintf("dns.server.%d", i)]; ok { - if ip := net.ParseIP(ipStr); ip != nil { - nameservers = append(nameservers, ip) - } else { - return nil, fmt.Errorf("invalid nameserver: %q", ipStr) - } - } else { - break - } - } - log.Printf("Parsed %d nameservers", len(nameservers)) - - var interfaces []InterfaceGenerator - for i := 0; ; i++ { - var addresses []net.IPNet - var routes []route - var err error - var dhcp bool - iface := &physicalInterface{} - - log.Printf("Proccessing interface %d", i) - - log.Println("Processing DHCP") - if dhcp, err = processDHCPConfig(config, fmt.Sprintf("interface.%d.", i)); err != nil { - return nil, err - } - - log.Println("Processing addresses") - if as, err := processAddressConfig(config, fmt.Sprintf("interface.%d.", i)); err == nil { - addresses = append(addresses, as...) - } else { - return nil, err - } - - log.Println("Processing routes") - if rs, err := processRouteConfig(config, fmt.Sprintf("interface.%d.", i)); err == nil { - routes = append(routes, rs...) - } else { - return nil, err - } - - if mac, ok := config[fmt.Sprintf("interface.%d.mac", i)]; ok { - log.Printf("Parsing interface %d MAC address: %q", i, mac) - if hwaddr, err := net.ParseMAC(mac); err == nil { - iface.hwaddr = hwaddr - } else { - return nil, fmt.Errorf("error while parsing MAC address: %v", err) - } - } - - if name, ok := config[fmt.Sprintf("interface.%d.name", i)]; ok { - log.Printf("Parsing interface %d name: %q", i, name) - iface.name = name - } - - if len(addresses) > 0 || len(routes) > 0 { - iface.config = configMethodStatic{ - hwaddress: iface.hwaddr, - addresses: addresses, - nameservers: nameservers, - routes: routes, - } - } else if dhcp { - iface.config = configMethodDHCP{ - hwaddress: iface.hwaddr, - } - } else { - break - } - - interfaces = append(interfaces, iface) - } - - return interfaces, nil -} - -func processAddressConfig(config map[string]string, prefix string) (addresses []net.IPNet, err error) { - for a := 0; ; a++ { - prefix := fmt.Sprintf("%sip.%d.", prefix, a) - - addressStr, ok := config[prefix+"address"] - if !ok { - break - } - - ip, network, err := net.ParseCIDR(addressStr) - if err != nil { - return nil, fmt.Errorf("invalid address: %q", addressStr) - } - addresses = append(addresses, net.IPNet{ - IP: ip, - Mask: network.Mask, - }) - } - - return -} - -func processRouteConfig(config map[string]string, prefix string) (routes []route, err error) { - for r := 0; ; r++ { - prefix := fmt.Sprintf("%sroute.%d.", prefix, r) - - gatewayStr, gok := config[prefix+"gateway"] - destinationStr, dok := config[prefix+"destination"] - if gok && !dok { - return nil, fmt.Errorf("missing destination key") - } else if !gok && dok { - return nil, fmt.Errorf("missing gateway key") - } else if !gok && !dok { - break - } - - gateway := net.ParseIP(gatewayStr) - if gateway == nil { - return nil, fmt.Errorf("invalid gateway: %q", gatewayStr) - } - - _, destination, err := net.ParseCIDR(destinationStr) - if err != nil { - return nil, err - } - - routes = append(routes, route{ - destination: *destination, - gateway: gateway, - }) - } - - return -} - -func processDHCPConfig(config map[string]string, prefix string) (dhcp bool, err error) { - dhcpStr, ok := config[prefix+"dhcp"] - if !ok { - return false, nil - } - - switch dhcpStr { - case "yes": - return true, nil - case "no": - return false, nil - default: - return false, fmt.Errorf("invalid DHCP option: %q", dhcpStr) - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/vmware_test.go b/vendor/github.com/coreos/coreos-cloudinit/network/vmware_test.go deleted file mode 100644 index 010568c9..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/vmware_test.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "errors" - "net" - "reflect" - "testing" -) - -func mustParseMac(mac net.HardwareAddr, err error) net.HardwareAddr { - if err != nil { - panic(err) - } - return mac -} - -func TestProcessVMwareNetconf(t *testing.T) { - tests := []struct { - config map[string]string - - interfaces []InterfaceGenerator - err error - }{ - {}, - { - config: map[string]string{ - "interface.0.dhcp": "yes", - }, - interfaces: []InterfaceGenerator{ - &physicalInterface{logicalInterface{ - config: configMethodDHCP{}, - }}, - }, - }, - { - config: map[string]string{ - "interface.0.mac": "00:11:22:33:44:55", - "interface.0.dhcp": "yes", - }, - interfaces: []InterfaceGenerator{ - &physicalInterface{logicalInterface{ - hwaddr: mustParseMac(net.ParseMAC("00:11:22:33:44:55")), - config: configMethodDHCP{hwaddress: mustParseMac(net.ParseMAC("00:11:22:33:44:55"))}, - }}, - }, - }, - { - config: map[string]string{ - "interface.0.name": "eth0", - "interface.0.dhcp": "yes", - }, - interfaces: []InterfaceGenerator{ - &physicalInterface{logicalInterface{ - name: "eth0", - config: configMethodDHCP{}, - }}, - }, - }, - { - config: map[string]string{ - "interface.0.mac": "00:11:22:33:44:55", - "interface.0.ip.0.address": "10.0.0.100/24", - "interface.0.route.0.gateway": "10.0.0.1", - "interface.0.route.0.destination": "0.0.0.0/0", - }, - interfaces: []InterfaceGenerator{ - &physicalInterface{logicalInterface{ - hwaddr: mustParseMac(net.ParseMAC("00:11:22:33:44:55")), - config: configMethodStatic{ - hwaddress: mustParseMac(net.ParseMAC("00:11:22:33:44:55")), - addresses: []net.IPNet{net.IPNet{IP: net.ParseIP("10.0.0.100"), Mask: net.CIDRMask(24, net.IPv4len*8)}}, - // I realize how upset you must be that I am shoving an IPMask into an IP. This is because net.IPv4zero is - // actually a magic IPv6 address which ruins our equality check. What's that? Just use IP::Equal()? I'd rather - // DeepEqual just handle that for me, but until Go gets operator overloading, we are stuck with this. - routes: []route{route{ - destination: net.IPNet{IP: net.IP(net.CIDRMask(0, net.IPv4len*8)), Mask: net.CIDRMask(0, net.IPv4len*8)}, - gateway: net.ParseIP("10.0.0.1")}, - }, - }, - }}, - }, - }, - { - config: map[string]string{ - "dns.server.0": "1.2.3.4", - "dns.server.1": "5.6.7.8", - "interface.0.mac": "00:11:22:33:44:55", - "interface.0.ip.0.address": "10.0.0.100/24", - "interface.0.ip.1.address": "10.0.0.101/24", - "interface.0.route.0.gateway": "10.0.0.1", - "interface.0.route.0.destination": "0.0.0.0/0", - "interface.1.name": "eth0", - "interface.1.ip.0.address": "10.0.1.100/24", - "interface.1.route.0.gateway": "10.0.1.1", - "interface.1.route.0.destination": "0.0.0.0/0", - "interface.2.dhcp": "yes", - "interface.2.mac": "00:11:22:33:44:77", - }, - interfaces: []InterfaceGenerator{ - &physicalInterface{logicalInterface{ - hwaddr: mustParseMac(net.ParseMAC("00:11:22:33:44:55")), - config: configMethodStatic{ - hwaddress: mustParseMac(net.ParseMAC("00:11:22:33:44:55")), - addresses: []net.IPNet{ - net.IPNet{IP: net.ParseIP("10.0.0.100"), Mask: net.CIDRMask(24, net.IPv4len*8)}, - net.IPNet{IP: net.ParseIP("10.0.0.101"), Mask: net.CIDRMask(24, net.IPv4len*8)}, - }, - routes: []route{route{ - destination: net.IPNet{IP: net.IP(net.CIDRMask(0, net.IPv4len*8)), Mask: net.CIDRMask(0, net.IPv4len*8)}, - gateway: net.ParseIP("10.0.0.1")}, - }, - nameservers: []net.IP{net.ParseIP("1.2.3.4"), net.ParseIP("5.6.7.8")}, - }, - }}, - &physicalInterface{logicalInterface{ - name: "eth0", - config: configMethodStatic{ - addresses: []net.IPNet{net.IPNet{IP: net.ParseIP("10.0.1.100"), Mask: net.CIDRMask(24, net.IPv4len*8)}}, - routes: []route{route{ - destination: net.IPNet{IP: net.IP(net.CIDRMask(0, net.IPv4len*8)), Mask: net.CIDRMask(0, net.IPv4len*8)}, - gateway: net.ParseIP("10.0.1.1")}, - }, - nameservers: []net.IP{net.ParseIP("1.2.3.4"), net.ParseIP("5.6.7.8")}, - }, - }}, - &physicalInterface{logicalInterface{ - hwaddr: mustParseMac(net.ParseMAC("00:11:22:33:44:77")), - config: configMethodDHCP{hwaddress: mustParseMac(net.ParseMAC("00:11:22:33:44:77"))}, - }}, - }, - }, - { - config: map[string]string{"dns.server.0": "test dns"}, - err: errors.New(`invalid nameserver: "test dns"`), - }, - } - - for i, tt := range tests { - interfaces, err := ProcessVMwareNetconf(tt.config) - if !reflect.DeepEqual(tt.err, err) { - t.Errorf("bad error (#%d): want %v, got %v", i, tt.err, err) - } - if !reflect.DeepEqual(tt.interfaces, interfaces) { - t.Errorf("bad interfaces (#%d): want %#v, got %#v", i, tt.interfaces, interfaces) - for _, iface := range tt.interfaces { - t.Logf(" want: %#v", iface) - } - for _, iface := range interfaces { - t.Logf(" got: %#v", iface) - } - } - } -} - -func TestProcessAddressConfig(t *testing.T) { - tests := []struct { - config map[string]string - prefix string - - addresses []net.IPNet - err error - }{ - {}, - - // static - ipv4 - { - config: map[string]string{ - "ip.0.address": "10.0.0.100/24", - }, - - addresses: []net.IPNet{{IP: net.ParseIP("10.0.0.100"), Mask: net.CIDRMask(24, net.IPv4len*8)}}, - }, - { - config: map[string]string{ - "this.is.a.prefix.ip.0.address": "10.0.0.100/24", - }, - prefix: "this.is.a.prefix.", - - addresses: []net.IPNet{{IP: net.ParseIP("10.0.0.100"), Mask: net.CIDRMask(24, net.IPv4len*8)}}, - }, - { - config: map[string]string{ - "ip.0.address": "10.0.0.100/24", - "ip.1.address": "10.0.0.101/24", - "ip.2.address": "10.0.0.102/24", - }, - - addresses: []net.IPNet{ - {IP: net.ParseIP("10.0.0.100"), Mask: net.CIDRMask(24, net.IPv4len*8)}, - {IP: net.ParseIP("10.0.0.101"), Mask: net.CIDRMask(24, net.IPv4len*8)}, - {IP: net.ParseIP("10.0.0.102"), Mask: net.CIDRMask(24, net.IPv4len*8)}, - }, - }, - - // static - ipv6 - { - config: map[string]string{ - "ip.0.address": "fe00::100/64", - }, - - addresses: []net.IPNet{{IP: net.ParseIP("fe00::100"), Mask: net.IPMask(net.CIDRMask(64, net.IPv6len*8))}}, - }, - { - config: map[string]string{ - "ip.0.address": "fe00::100/64", - "ip.1.address": "fe00::101/64", - "ip.2.address": "fe00::102/64", - }, - - addresses: []net.IPNet{ - {IP: net.ParseIP("fe00::100"), Mask: net.CIDRMask(64, net.IPv6len*8)}, - {IP: net.ParseIP("fe00::101"), Mask: net.CIDRMask(64, net.IPv6len*8)}, - {IP: net.ParseIP("fe00::102"), Mask: net.CIDRMask(64, net.IPv6len*8)}, - }, - }, - - // invalid - { - config: map[string]string{ - "ip.0.address": "test address", - }, - - err: errors.New(`invalid address: "test address"`), - }, - } - - for i, tt := range tests { - addresses, err := processAddressConfig(tt.config, tt.prefix) - if !reflect.DeepEqual(tt.err, err) { - t.Errorf("bad error (#%d): want %v, got %v", i, tt.err, err) - } - if err != nil { - continue - } - - if !reflect.DeepEqual(tt.addresses, addresses) { - t.Errorf("bad addresses (#%d): want %#v, got %#v", i, tt.addresses, addresses) - } - } -} - -func TestProcessRouteConfig(t *testing.T) { - tests := []struct { - config map[string]string - prefix string - - routes []route - err error - }{ - {}, - - { - config: map[string]string{ - "route.0.gateway": "10.0.0.1", - "route.0.destination": "0.0.0.0/0", - }, - - routes: []route{{destination: net.IPNet{IP: net.IP(net.CIDRMask(0, net.IPv4len*8)), Mask: net.CIDRMask(0, net.IPv4len*8)}, gateway: net.ParseIP("10.0.0.1")}}, - }, - { - config: map[string]string{ - "this.is.a.prefix.route.0.gateway": "10.0.0.1", - "this.is.a.prefix.route.0.destination": "0.0.0.0/0", - }, - prefix: "this.is.a.prefix.", - - routes: []route{{destination: net.IPNet{IP: net.IP(net.CIDRMask(0, net.IPv4len*8)), Mask: net.CIDRMask(0, net.IPv4len*8)}, gateway: net.ParseIP("10.0.0.1")}}, - }, - { - config: map[string]string{ - "route.0.gateway": "fe00::1", - "route.0.destination": "::/0", - }, - - routes: []route{{destination: net.IPNet{IP: net.IPv6zero, Mask: net.IPMask(net.IPv6zero)}, gateway: net.ParseIP("fe00::1")}}, - }, - - // invalid - { - config: map[string]string{ - "route.0.gateway": "test gateway", - "route.0.destination": "0.0.0.0/0", - }, - - err: errors.New(`invalid gateway: "test gateway"`), - }, - { - config: map[string]string{ - "route.0.gateway": "10.0.0.1", - "route.0.destination": "test destination", - }, - - err: &net.ParseError{Type: "CIDR address", Text: "test destination"}, - }, - } - - for i, tt := range tests { - routes, err := processRouteConfig(tt.config, tt.prefix) - if !reflect.DeepEqual(tt.err, err) { - t.Errorf("bad error (#%d): want %v, got %v", i, tt.err, err) - } - if err != nil { - continue - } - - if !reflect.DeepEqual(tt.routes, routes) { - t.Errorf("bad routes (#%d): want %#v, got %#v", i, tt.routes, routes) - } - } -} - -func TestProcessDHCPConfig(t *testing.T) { - tests := []struct { - config map[string]string - prefix string - - dhcp bool - err error - }{ - {}, - - // prefix - {config: map[string]string{"this.is.a.prefix.mac": ""}, prefix: "this.is.a.prefix.", dhcp: false}, - {config: map[string]string{"this.is.a.prefix.dhcp": "yes"}, prefix: "this.is.a.prefix.", dhcp: true}, - - // dhcp - {config: map[string]string{"dhcp": "yes"}, dhcp: true}, - {config: map[string]string{"dhcp": "no"}, dhcp: false}, - - // invalid - {config: map[string]string{"dhcp": "blah"}, err: errors.New(`invalid DHCP option: "blah"`)}, - } - - for i, tt := range tests { - dhcp, err := processDHCPConfig(tt.config, tt.prefix) - if !reflect.DeepEqual(tt.err, err) { - t.Errorf("bad error (#%d): want %v, got %v", i, tt.err, err) - } - if err != nil { - continue - } - - if tt.dhcp != dhcp { - t.Errorf("bad dhcp (#%d): want %v, got %v", i, tt.dhcp, dhcp) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/pkg/http_client.go b/vendor/github.com/coreos/coreos-cloudinit/pkg/http_client.go index c4fb8032..79556a88 100644 --- a/vendor/github.com/coreos/coreos-cloudinit/pkg/http_client.go +++ b/vendor/github.com/coreos/coreos-cloudinit/pkg/http_client.go @@ -62,8 +62,8 @@ type HttpClient struct { // Maximum number of connection retries. Defaults to 15 MaxRetries int - // Whether or not to skip TLS verification. Defaults to false - SkipTLS bool + // Headers to add to the request. + Header http.Header client *http.Client } @@ -74,11 +74,15 @@ type Getter interface { } func NewHttpClient() *HttpClient { + return NewHttpClientHeader(nil) +} + +func NewHttpClientHeader(header http.Header) *HttpClient { hc := &HttpClient{ InitialBackoff: 50 * time.Millisecond, MaxBackoff: time.Second * 5, MaxRetries: 15, - SkipTLS: false, + Header: header, client: &http.Client{ Timeout: 10 * time.Second, }, @@ -139,7 +143,13 @@ func (h *HttpClient) GetRetry(rawurl string) ([]byte, error) { } func (h *HttpClient) Get(dataURL string) ([]byte, error) { - if resp, err := h.client.Get(dataURL); err == nil { + request, err := http.NewRequest("GET", dataURL, nil) + if err != nil { + return nil, err + } + + request.Header = h.Header + if resp, err := h.client.Do(request); err == nil { defer resp.Body.Close() switch resp.StatusCode / 100 { case HTTP_2xx: diff --git a/vendor/github.com/coreos/coreos-cloudinit/pkg/http_client_test.go b/vendor/github.com/coreos/coreos-cloudinit/pkg/http_client_test.go deleted file mode 100644 index d581d94e..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/pkg/http_client_test.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pkg - -import ( - "fmt" - "io" - "math" - "net/http" - "net/http/httptest" - "testing" - "time" -) - -func TestExpBackoff(t *testing.T) { - duration := time.Millisecond - max := time.Hour - for i := 0; i < math.MaxUint16; i++ { - duration = ExpBackoff(duration, max) - if duration < 0 { - t.Fatalf("duration too small: %v %v", duration, i) - } - if duration > max { - t.Fatalf("duration too large: %v %v", duration, i) - } - } -} - -// Test exponential backoff and that it continues retrying if a 5xx response is -// received -func TestGetURLExpBackOff(t *testing.T) { - var expBackoffTests = []struct { - count int - body string - }{ - {0, "number of attempts: 0"}, - {1, "number of attempts: 1"}, - {2, "number of attempts: 2"}, - } - client := NewHttpClient() - - for i, tt := range expBackoffTests { - mux := http.NewServeMux() - count := 0 - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - if count == tt.count { - io.WriteString(w, fmt.Sprintf("number of attempts: %d", count)) - return - } - count++ - http.Error(w, "", 500) - }) - ts := httptest.NewServer(mux) - defer ts.Close() - - data, err := client.GetRetry(ts.URL) - if err != nil { - t.Errorf("Test case %d produced error: %v", i, err) - } - - if count != tt.count { - t.Errorf("Test case %d failed: %d != %d", i, count, tt.count) - } - - if string(data) != tt.body { - t.Errorf("Test case %d failed: %s != %s", i, tt.body, data) - } - } -} - -// Test that it stops retrying if a 4xx response comes back -func TestGetURL4xx(t *testing.T) { - client := NewHttpClient() - retries := 0 - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - retries++ - http.Error(w, "", 404) - })) - defer ts.Close() - - _, err := client.GetRetry(ts.URL) - if err == nil { - t.Errorf("Incorrect result\ngot: %s\nwant: %s", err.Error(), "Not found. HTTP status code: 404") - } - - if retries > 1 { - t.Errorf("Number of retries:\n%d\nExpected number of retries:\n%d", retries, 1) - } -} - -// Test that it fetches and returns user-data just fine -func TestGetURL2xx(t *testing.T) { - var cloudcfg = ` -#cloud-config -coreos: - oem: - id: test - name: CoreOS.box for Test - version-id: %VERSION_ID%+%BUILD_ID% - home-url: https://github.com/coreos/coreos-cloudinit - bug-report-url: https://github.com/coreos/coreos-cloudinit - update: - reboot-strategy: best-effort -` - - client := NewHttpClient() - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, cloudcfg) - })) - defer ts.Close() - - data, err := client.GetRetry(ts.URL) - if err != nil { - t.Errorf("Incorrect result\ngot: %v\nwant: %v", err, nil) - } - - if string(data) != cloudcfg { - t.Errorf("Incorrect result\ngot: %s\nwant: %s", string(data), cloudcfg) - } -} - -// Test attempt to fetching using malformed URL -func TestGetMalformedURL(t *testing.T) { - client := NewHttpClient() - - var tests = []struct { - url string - want string - }{ - {"boo", "URL boo does not have a valid HTTP scheme. Skipping."}, - {"mailto://boo", "URL mailto://boo does not have a valid HTTP scheme. Skipping."}, - {"ftp://boo", "URL ftp://boo does not have a valid HTTP scheme. Skipping."}, - {"", "URL is empty. Skipping."}, - } - - for _, test := range tests { - _, err := client.GetRetry(test.url) - if err == nil || err.Error() != test.want { - t.Errorf("Incorrect result\ngot: %v\nwant: %v", err, test.want) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/env_file.go b/vendor/github.com/coreos/coreos-cloudinit/system/env_file.go index 6b2c926d..4c6e08b1 100644 --- a/vendor/github.com/coreos/coreos-cloudinit/system/env_file.go +++ b/vendor/github.com/coreos/coreos-cloudinit/system/env_file.go @@ -106,7 +106,7 @@ func WriteEnvFile(ef *EnvFile, root string) error { // keys returns the keys of a map in sorted order func keys(m map[string]string) (s []string) { - for k, _ := range m { + for k := range m { s = append(s, k) } sort.Strings(s) diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/env_file_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/env_file_test.go deleted file mode 100644 index 5ec03640..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/env_file_test.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "io/ioutil" - "os" - "path" - "strings" - "syscall" - "testing" - - "github.com/coreos/coreos-cloudinit/config" -) - -const ( - base = "# a file\nFOO=base\n\nBAR= hi there\n" - baseNoNewline = "# a file\nFOO=base\n\nBAR= hi there" - baseDos = "# a file\r\nFOO=base\r\n\r\nBAR= hi there\r\n" - expectUpdate = "# a file\nFOO=test\n\nBAR= hi there\nNEW=a value\n" - expectCreate = "FOO=test\nNEW=a value\n" -) - -var ( - valueUpdate = map[string]string{ - "FOO": "test", - "NEW": "a value", - } - valueNoop = map[string]string{ - "FOO": "base", - } - valueEmpty = map[string]string{} - valueInvalid = map[string]string{ - "FOO-X": "test", - } -) - -func TestWriteEnvFileUpdate(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - name := "foo.conf" - fullPath := path.Join(dir, name) - ioutil.WriteFile(fullPath, []byte(base), 0644) - - oldStat, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - ef := EnvFile{ - File: &File{config.File{ - Path: name, - }}, - Vars: valueUpdate, - } - - err = WriteEnvFile(&ef, dir) - if err != nil { - t.Fatalf("WriteFile failed: %v", err) - } - - contents, err := ioutil.ReadFile(fullPath) - if err != nil { - t.Fatalf("Unable to read expected file: %v", err) - } - - if string(contents) != expectUpdate { - t.Fatalf("File has incorrect contents: %q", contents) - } - - newStat, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - if oldStat.Sys().(*syscall.Stat_t).Ino == newStat.Sys().(*syscall.Stat_t).Ino { - t.Fatalf("File was not replaced: %s", fullPath) - } -} - -func TestWriteEnvFileUpdateNoNewline(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - name := "foo.conf" - fullPath := path.Join(dir, name) - ioutil.WriteFile(fullPath, []byte(baseNoNewline), 0644) - - oldStat, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - ef := EnvFile{ - File: &File{config.File{ - Path: name, - }}, - Vars: valueUpdate, - } - - err = WriteEnvFile(&ef, dir) - if err != nil { - t.Fatalf("WriteFile failed: %v", err) - } - - contents, err := ioutil.ReadFile(fullPath) - if err != nil { - t.Fatalf("Unable to read expected file: %v", err) - } - - if string(contents) != expectUpdate { - t.Fatalf("File has incorrect contents: %q", contents) - } - - newStat, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - if oldStat.Sys().(*syscall.Stat_t).Ino == newStat.Sys().(*syscall.Stat_t).Ino { - t.Fatalf("File was not replaced: %s", fullPath) - } -} - -func TestWriteEnvFileCreate(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - name := "foo.conf" - fullPath := path.Join(dir, name) - - ef := EnvFile{ - File: &File{config.File{ - Path: name, - }}, - Vars: valueUpdate, - } - - err = WriteEnvFile(&ef, dir) - if err != nil { - t.Fatalf("WriteFile failed: %v", err) - } - - contents, err := ioutil.ReadFile(fullPath) - if err != nil { - t.Fatalf("Unable to read expected file: %v", err) - } - - if string(contents) != expectCreate { - t.Fatalf("File has incorrect contents: %q", contents) - } -} - -func TestWriteEnvFileNoop(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - name := "foo.conf" - fullPath := path.Join(dir, name) - ioutil.WriteFile(fullPath, []byte(base), 0644) - - oldStat, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - ef := EnvFile{ - File: &File{config.File{ - Path: name, - }}, - Vars: valueNoop, - } - - err = WriteEnvFile(&ef, dir) - if err != nil { - t.Fatalf("WriteFile failed: %v", err) - } - - contents, err := ioutil.ReadFile(fullPath) - if err != nil { - t.Fatalf("Unable to read expected file: %v", err) - } - - if string(contents) != base { - t.Fatalf("File has incorrect contents: %q", contents) - } - - newStat, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - if oldStat.Sys().(*syscall.Stat_t).Ino != newStat.Sys().(*syscall.Stat_t).Ino { - t.Fatalf("File was replaced: %s", fullPath) - } -} - -func TestWriteEnvFileUpdateDos(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - name := "foo.conf" - fullPath := path.Join(dir, name) - ioutil.WriteFile(fullPath, []byte(baseDos), 0644) - - oldStat, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - ef := EnvFile{ - File: &File{config.File{ - Path: name, - }}, - Vars: valueUpdate, - } - - err = WriteEnvFile(&ef, dir) - if err != nil { - t.Fatalf("WriteFile failed: %v", err) - } - - contents, err := ioutil.ReadFile(fullPath) - if err != nil { - t.Fatalf("Unable to read expected file: %v", err) - } - - if string(contents) != expectUpdate { - t.Fatalf("File has incorrect contents: %q", contents) - } - - newStat, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - if oldStat.Sys().(*syscall.Stat_t).Ino == newStat.Sys().(*syscall.Stat_t).Ino { - t.Fatalf("File was not replaced: %s", fullPath) - } -} - -// A middle ground noop, values are unchanged but we did have a value. -// Seems reasonable to rewrite the file in Unix format anyway. -func TestWriteEnvFileDos2Unix(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - name := "foo.conf" - fullPath := path.Join(dir, name) - ioutil.WriteFile(fullPath, []byte(baseDos), 0644) - - oldStat, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - ef := EnvFile{ - File: &File{config.File{ - Path: name, - }}, - Vars: valueNoop, - } - - err = WriteEnvFile(&ef, dir) - if err != nil { - t.Fatalf("WriteFile failed: %v", err) - } - - contents, err := ioutil.ReadFile(fullPath) - if err != nil { - t.Fatalf("Unable to read expected file: %v", err) - } - - if string(contents) != base { - t.Fatalf("File has incorrect contents: %q", contents) - } - - newStat, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - if oldStat.Sys().(*syscall.Stat_t).Ino == newStat.Sys().(*syscall.Stat_t).Ino { - t.Fatalf("File was not replaced: %s", fullPath) - } -} - -// If it really is a noop (structure is empty) don't even do dos2unix -func TestWriteEnvFileEmpty(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - name := "foo.conf" - fullPath := path.Join(dir, name) - ioutil.WriteFile(fullPath, []byte(baseDos), 0644) - - oldStat, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - ef := EnvFile{ - File: &File{config.File{ - Path: name, - }}, - Vars: valueEmpty, - } - - err = WriteEnvFile(&ef, dir) - if err != nil { - t.Fatalf("WriteFile failed: %v", err) - } - - contents, err := ioutil.ReadFile(fullPath) - if err != nil { - t.Fatalf("Unable to read expected file: %v", err) - } - - if string(contents) != baseDos { - t.Fatalf("File has incorrect contents: %q", contents) - } - - newStat, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - if oldStat.Sys().(*syscall.Stat_t).Ino != newStat.Sys().(*syscall.Stat_t).Ino { - t.Fatalf("File was replaced: %s", fullPath) - } -} - -// no point in creating empty files -func TestWriteEnvFileEmptyNoCreate(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - name := "foo.conf" - fullPath := path.Join(dir, name) - - ef := EnvFile{ - File: &File{config.File{ - Path: name, - }}, - Vars: valueEmpty, - } - - err = WriteEnvFile(&ef, dir) - if err != nil { - t.Fatalf("WriteFile failed: %v", err) - } - - contents, err := ioutil.ReadFile(fullPath) - if err == nil { - t.Fatalf("File has incorrect contents: %q", contents) - } else if !os.IsNotExist(err) { - t.Fatalf("Unexpected error while reading file: %v", err) - } -} - -func TestWriteEnvFilePermFailure(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - name := "foo.conf" - fullPath := path.Join(dir, name) - ioutil.WriteFile(fullPath, []byte(base), 0000) - - ef := EnvFile{ - File: &File{config.File{ - Path: name, - }}, - Vars: valueUpdate, - } - - err = WriteEnvFile(&ef, dir) - if !os.IsPermission(err) { - t.Fatalf("Not a pemission denied error: %v", err) - } -} - -func TestWriteEnvFileNameFailure(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - name := "foo.conf" - - ef := EnvFile{ - File: &File{config.File{ - Path: name, - }}, - Vars: valueInvalid, - } - - err = WriteEnvFile(&ef, dir) - if err == nil || !strings.HasPrefix(err.Error(), "Invalid name") { - t.Fatalf("Not an invalid name error: %v", err) - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/env_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/env_test.go deleted file mode 100644 index 9d62aed5..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/env_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "testing" -) - -func TestServiceContents(t *testing.T) { - tests := []struct { - Config interface{} - Contents string - }{ - { - struct{}{}, - "", - }, - { - struct { - A string `env:"A"` - B int `env:"B"` - C bool `env:"C"` - D float64 `env:"D"` - }{ - "hi", 1, true, 0.12345, - }, - `[Service] -Environment="A=hi" -Environment="B=1" -Environment="C=true" -Environment="D=0.12345" -`, - }, - { - struct { - A float64 `env:"A"` - B float64 `env:"B"` - C float64 `env:"C"` - D float64 `env:"D"` - }{ - 0.000001, 1, 0.9999999, 0.1, - }, - `[Service] -Environment="A=1e-06" -Environment="B=1" -Environment="C=0.9999999" -Environment="D=0.1" -`, - }, - } - - for _, tt := range tests { - if c := serviceContents(tt.Config); c != tt.Contents { - t.Errorf("bad contents (%+v): want %q, got %q", tt, tt.Contents, c) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/etc_hosts_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/etc_hosts_test.go deleted file mode 100644 index e5efd7c4..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/etc_hosts_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "fmt" - "os" - "reflect" - "testing" - - "github.com/coreos/coreos-cloudinit/config" -) - -func TestEtcdHostsFile(t *testing.T) { - hostname, err := os.Hostname() - if err != nil { - panic(err) - } - - for _, tt := range []struct { - config config.EtcHosts - file *File - err error - }{ - { - "invalid", - nil, - fmt.Errorf("Invalid option to manage_etc_hosts"), - }, - { - "localhost", - &File{config.File{ - Content: fmt.Sprintf("127.0.0.1 %s\n", hostname), - Path: "etc/hosts", - RawFilePermissions: "0644", - }}, - nil, - }, - } { - file, err := EtcHosts{tt.config}.File() - if !reflect.DeepEqual(tt.err, err) { - t.Errorf("bad error (%q): want %q, got %q", tt.config, tt.err, err) - } - if !reflect.DeepEqual(tt.file, file) { - t.Errorf("bad units (%q): want %#v, got %#v", tt.config, tt.file, file) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/etcd_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/etcd_test.go deleted file mode 100644 index 5fc17f02..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/etcd_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "reflect" - "testing" - - "github.com/coreos/coreos-cloudinit/config" -) - -func TestEtcdUnits(t *testing.T) { - for _, tt := range []struct { - config config.Etcd - units []Unit - }{ - { - config.Etcd{}, - []Unit{{config.Unit{ - Name: "etcd.service", - Runtime: true, - DropIns: []config.UnitDropIn{{Name: "20-cloudinit.conf"}}, - }}}, - }, - { - config.Etcd{ - Discovery: "http://disco.example.com/foobar", - PeerBindAddr: "127.0.0.1:7002", - }, - []Unit{{config.Unit{ - Name: "etcd.service", - Runtime: true, - DropIns: []config.UnitDropIn{{ - Name: "20-cloudinit.conf", - Content: `[Service] -Environment="ETCD_DISCOVERY=http://disco.example.com/foobar" -Environment="ETCD_PEER_BIND_ADDR=127.0.0.1:7002" -`, - }}, - }}}, - }, - { - config.Etcd{ - Name: "node001", - Discovery: "http://disco.example.com/foobar", - PeerBindAddr: "127.0.0.1:7002", - }, - []Unit{{config.Unit{ - Name: "etcd.service", - Runtime: true, - DropIns: []config.UnitDropIn{{ - Name: "20-cloudinit.conf", - Content: `[Service] -Environment="ETCD_DISCOVERY=http://disco.example.com/foobar" -Environment="ETCD_NAME=node001" -Environment="ETCD_PEER_BIND_ADDR=127.0.0.1:7002" -`, - }}, - }}}, - }, - } { - units := Etcd{tt.config}.Units() - if !reflect.DeepEqual(tt.units, units) { - t.Errorf("bad units (%+v): want %#v, got %#v", tt.config, tt.units, units) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/file.go b/vendor/github.com/coreos/coreos-cloudinit/system/file.go index 50b1466e..57f48e27 100644 --- a/vendor/github.com/coreos/coreos-cloudinit/system/file.go +++ b/vendor/github.com/coreos/coreos-cloudinit/system/file.go @@ -45,17 +45,16 @@ func (f *File) Permissions() (os.FileMode, error) { return os.FileMode(perm), nil } +// WriteFile writes given endecoded file to the filesystem func WriteFile(f *File, root string) (string, error) { + if f.Encoding != "" { + return "", fmt.Errorf("Unable to write file with encoding %s", f.Encoding) + } + fullpath := path.Join(root, f.Path) dir := path.Dir(fullpath) log.Printf("Writing file to %q", fullpath) - content, err := config.DecodeContent(f.Content, f.Encoding) - - if err != nil { - return "", fmt.Errorf("Unable to decode %s (%v)", f.Path, err) - } - if err := EnsureDirectoryExists(dir); err != nil { return "", err } @@ -71,7 +70,7 @@ func WriteFile(f *File, root string) (string, error) { return "", err } - if err := ioutil.WriteFile(tmp.Name(), content, perm); err != nil { + if err := ioutil.WriteFile(tmp.Name(), []byte(f.Content), perm); err != nil { return "", err } diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/file_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/file_test.go deleted file mode 100644 index f68ec2fa..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/file_test.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "io/ioutil" - "os" - "path" - "testing" - - "github.com/coreos/coreos-cloudinit/config" -) - -func TestWriteFileUnencodedContent(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - fn := "foo" - fullPath := path.Join(dir, fn) - - wf := File{config.File{ - Path: fn, - Content: "bar", - RawFilePermissions: "0644", - }} - - path, err := WriteFile(&wf, dir) - if err != nil { - t.Fatalf("Processing of WriteFile failed: %v", err) - } else if path != fullPath { - t.Fatalf("WriteFile returned bad path: want %s, got %s", fullPath, path) - } - - fi, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - if fi.Mode() != os.FileMode(0644) { - t.Errorf("File has incorrect mode: %v", fi.Mode()) - } - - contents, err := ioutil.ReadFile(fullPath) - if err != nil { - t.Fatalf("Unable to read expected file: %v", err) - } - - if string(contents) != "bar" { - t.Fatalf("File has incorrect contents") - } -} - -func TestWriteFileInvalidPermission(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - wf := File{config.File{ - Path: path.Join(dir, "tmp", "foo"), - Content: "bar", - RawFilePermissions: "pants", - }} - - if _, err := WriteFile(&wf, dir); err == nil { - t.Fatalf("Expected error to be raised when writing file with invalid permission") - } -} - -func TestDecimalFilePermissions(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - fn := "foo" - fullPath := path.Join(dir, fn) - - wf := File{config.File{ - Path: fn, - RawFilePermissions: "744", - }} - - path, err := WriteFile(&wf, dir) - if err != nil { - t.Fatalf("Processing of WriteFile failed: %v", err) - } else if path != fullPath { - t.Fatalf("WriteFile returned bad path: want %s, got %s", fullPath, path) - } - - fi, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - if fi.Mode() != os.FileMode(0744) { - t.Errorf("File has incorrect mode: %v", fi.Mode()) - } -} - -func TestWriteFilePermissions(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - fn := "foo" - fullPath := path.Join(dir, fn) - - wf := File{config.File{ - Path: fn, - RawFilePermissions: "0755", - }} - - path, err := WriteFile(&wf, dir) - if err != nil { - t.Fatalf("Processing of WriteFile failed: %v", err) - } else if path != fullPath { - t.Fatalf("WriteFile returned bad path: want %s, got %s", fullPath, path) - } - - fi, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - if fi.Mode() != os.FileMode(0755) { - t.Errorf("File has incorrect mode: %v", fi.Mode()) - } -} - -func TestWriteFileEncodedContent(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - //all of these decode to "bar" - content_tests := map[string]string{ - "base64": "YmFy", - "b64": "YmFy", - "gz": "\x1f\x8b\x08\x08w\x14\x87T\x02\xffok\x00KJ,\x02\x00\xaa\x8c\xffv\x03\x00\x00\x00", - "gzip": "\x1f\x8b\x08\x08w\x14\x87T\x02\xffok\x00KJ,\x02\x00\xaa\x8c\xffv\x03\x00\x00\x00", - "gz+base64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=", - "gzip+base64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=", - "gz+b64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=", - "gzip+b64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=", - } - - for encoding, content := range content_tests { - fullPath := path.Join(dir, encoding) - - wf := File{config.File{ - Path: encoding, - Encoding: encoding, - Content: content, - RawFilePermissions: "0644", - }} - - path, err := WriteFile(&wf, dir) - if err != nil { - t.Fatalf("Processing of WriteFile failed: %v", err) - } else if path != fullPath { - t.Fatalf("WriteFile returned bad path: want %s, got %s", fullPath, path) - } - - fi, err := os.Stat(fullPath) - if err != nil { - t.Fatalf("Unable to stat file: %v", err) - } - - if fi.Mode() != os.FileMode(0644) { - t.Errorf("File has incorrect mode: %v", fi.Mode()) - } - - contents, err := ioutil.ReadFile(fullPath) - if err != nil { - t.Fatalf("Unable to read expected file: %v", err) - } - - if string(contents) != "bar" { - t.Fatalf("File has incorrect contents: '%s'", contents) - } - } -} - -func TestWriteFileInvalidEncodedContent(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - content_encodings := []string{ - "base64", - "b64", - "gz", - "gzip", - "gz+base64", - "gzip+base64", - "gz+b64", - "gzip+b64", - } - - for _, encoding := range content_encodings { - wf := File{config.File{ - Path: path.Join(dir, "tmp", "foo"), - Content: "@&*#%invalid data*@&^#*&", - Encoding: encoding, - }} - - if _, err := WriteFile(&wf, dir); err == nil { - t.Fatalf("Expected error to be raised when writing file with encoding") - } - } -} - -func TestWriteFileUnknownEncodedContent(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - wf := File{config.File{ - Path: path.Join(dir, "tmp", "foo"), - Content: "", - Encoding: "no-such-encoding", - }} - - if _, err := WriteFile(&wf, dir); err == nil { - t.Fatalf("Expected error to be raised when writing file with encoding") - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/flannel_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/flannel_test.go deleted file mode 100644 index 7bc9f7f6..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/flannel_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "reflect" - "testing" - - "github.com/coreos/coreos-cloudinit/config" -) - -func TestFlannelEnvVars(t *testing.T) { - for _, tt := range []struct { - config config.Flannel - contents string - }{ - { - config.Flannel{}, - "", - }, - { - config.Flannel{ - EtcdEndpoints: "http://12.34.56.78:4001", - EtcdPrefix: "/coreos.com/network/tenant1", - }, - `FLANNELD_ETCD_ENDPOINTS=http://12.34.56.78:4001 -FLANNELD_ETCD_PREFIX=/coreos.com/network/tenant1`, - }, - } { - out := Flannel{tt.config}.envVars() - if out != tt.contents { - t.Errorf("bad contents (%+v): want %q, got %q", tt, tt.contents, out) - } - } -} - -func TestFlannelFile(t *testing.T) { - for _, tt := range []struct { - config config.Flannel - file *File - }{ - { - config.Flannel{}, - nil, - }, - { - config.Flannel{ - EtcdEndpoints: "http://12.34.56.78:4001", - EtcdPrefix: "/coreos.com/network/tenant1", - }, - &File{config.File{ - Path: "run/flannel/options.env", - RawFilePermissions: "0644", - Content: `FLANNELD_ETCD_ENDPOINTS=http://12.34.56.78:4001 -FLANNELD_ETCD_PREFIX=/coreos.com/network/tenant1`, - }}, - }, - } { - file, _ := Flannel{tt.config}.File() - if !reflect.DeepEqual(tt.file, file) { - t.Errorf("bad units (%q): want %#v, got %#v", tt.config, tt.file, file) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/fleet_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/fleet_test.go deleted file mode 100644 index dfe7c3f5..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/fleet_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "reflect" - "testing" - - "github.com/coreos/coreos-cloudinit/config" -) - -func TestFleetUnits(t *testing.T) { - for _, tt := range []struct { - config config.Fleet - units []Unit - }{ - { - config.Fleet{}, - []Unit{{config.Unit{ - Name: "fleet.service", - Runtime: true, - DropIns: []config.UnitDropIn{{Name: "20-cloudinit.conf"}}, - }}}, - }, - { - config.Fleet{ - PublicIP: "12.34.56.78", - }, - []Unit{{config.Unit{ - Name: "fleet.service", - Runtime: true, - DropIns: []config.UnitDropIn{{ - Name: "20-cloudinit.conf", - Content: `[Service] -Environment="FLEET_PUBLIC_IP=12.34.56.78" -`, - }}, - }}}, - }, - } { - units := Fleet{tt.config}.Units() - if !reflect.DeepEqual(units, tt.units) { - t.Errorf("bad units (%+v): want %#v, got %#v", tt.config, tt.units, units) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/locksmith_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/locksmith_test.go deleted file mode 100644 index 6d7d9887..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/locksmith_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "reflect" - "testing" - - "github.com/coreos/coreos-cloudinit/config" -) - -func TestLocksmithUnits(t *testing.T) { - for _, tt := range []struct { - config config.Locksmith - units []Unit - }{ - { - config.Locksmith{}, - []Unit{{config.Unit{ - Name: "locksmithd.service", - Runtime: true, - DropIns: []config.UnitDropIn{{Name: "20-cloudinit.conf"}}, - }}}, - }, - { - config.Locksmith{ - Endpoint: "12.34.56.78:4001", - }, - []Unit{{config.Unit{ - Name: "locksmithd.service", - Runtime: true, - DropIns: []config.UnitDropIn{{ - Name: "20-cloudinit.conf", - Content: `[Service] -Environment="LOCKSMITHD_ENDPOINT=12.34.56.78:4001" -`, - }}, - }}}, - }, - } { - units := Locksmith{tt.config}.Units() - if !reflect.DeepEqual(units, tt.units) { - t.Errorf("bad units (%+v): want %#v, got %#v", tt.config, tt.units, units) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/networkd.go b/vendor/github.com/coreos/coreos-cloudinit/system/networkd.go deleted file mode 100644 index 025b09cb..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/networkd.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "log" - "net" - "os/exec" - "strings" - - "github.com/coreos/coreos-cloudinit/config" - "github.com/coreos/coreos-cloudinit/network" - - "github.com/docker/libcontainer/netlink" -) - -func RestartNetwork(interfaces []network.InterfaceGenerator) (err error) { - defer func() { - if e := restartNetworkd(); e != nil { - err = e - } - }() - - if err = downNetworkInterfaces(interfaces); err != nil { - return - } - - if err = maybeProbe8012q(interfaces); err != nil { - return - } - return maybeProbeBonding(interfaces) -} - -func downNetworkInterfaces(interfaces []network.InterfaceGenerator) error { - sysInterfaceMap := make(map[string]*net.Interface) - if systemInterfaces, err := net.Interfaces(); err == nil { - for _, iface := range systemInterfaces { - iface := iface - sysInterfaceMap[iface.Name] = &iface - } - } else { - return err - } - - for _, iface := range interfaces { - if systemInterface, ok := sysInterfaceMap[iface.Name()]; ok { - log.Printf("Taking down interface %q\n", systemInterface.Name) - if err := netlink.NetworkLinkDown(systemInterface); err != nil { - log.Printf("Error while downing interface %q (%s). Continuing...\n", systemInterface.Name, err) - } - } - } - - return nil -} - -func maybeProbe8012q(interfaces []network.InterfaceGenerator) error { - for _, iface := range interfaces { - if iface.Type() == "vlan" { - log.Printf("Probing LKM %q (%q)\n", "8021q", "8021q") - return exec.Command("modprobe", "8021q").Run() - } - } - return nil -} - -func maybeProbeBonding(interfaces []network.InterfaceGenerator) error { - for _, iface := range interfaces { - if iface.Type() == "bond" { - args := append([]string{"bonding"}, strings.Split(iface.ModprobeParams(), " ")...) - log.Printf("Probing LKM %q (%q)\n", "bonding", args) - return exec.Command("modprobe", args...).Run() - } - } - return nil -} - -func restartNetworkd() error { - log.Printf("Restarting networkd.service\n") - networkd := Unit{config.Unit{Name: "systemd-networkd.service"}} - _, err := NewUnitManager("").RunUnitCommand(networkd, "restart") - return err -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/oem_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/oem_test.go deleted file mode 100644 index 38661201..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/oem_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "reflect" - "testing" - - "github.com/coreos/coreos-cloudinit/config" -) - -func TestOEMFile(t *testing.T) { - for _, tt := range []struct { - config config.OEM - file *File - }{ - { - config.OEM{}, - nil, - }, - { - config.OEM{ - ID: "rackspace", - Name: "Rackspace Cloud Servers", - VersionID: "168.0.0", - HomeURL: "https://www.rackspace.com/cloud/servers/", - BugReportURL: "https://github.com/coreos/coreos-overlay", - }, - &File{config.File{ - Path: "etc/oem-release", - RawFilePermissions: "0644", - Content: `ID=rackspace -VERSION_ID=168.0.0 -NAME="Rackspace Cloud Servers" -HOME_URL="https://www.rackspace.com/cloud/servers/" -BUG_REPORT_URL="https://github.com/coreos/coreos-overlay" -`, - }}, - }, - } { - file, err := OEM{tt.config}.File() - if err != nil { - t.Errorf("bad error (%q): want %v, got %q", tt.config, nil, err) - } - if !reflect.DeepEqual(tt.file, file) { - t.Errorf("bad file (%q): want %#v, got %#v", tt.config, tt.file, file) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/systemd.go b/vendor/github.com/coreos/coreos-cloudinit/system/systemd.go deleted file mode 100644 index 190b9e77..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/systemd.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "os/exec" - "path" - "strings" - - "github.com/coreos/go-systemd/dbus" - "github.com/coreos/coreos-cloudinit/config" -) - -func NewUnitManager(root string) UnitManager { - return &systemd{root} -} - -type systemd struct { - root string -} - -// fakeMachineID is placed on non-usr CoreOS images and should -// never be used as a true MachineID -const fakeMachineID = "42000000000000000000000000000042" - -// PlaceUnit writes a unit file at its desired destination, creating parent -// directories as necessary. -func (s *systemd) PlaceUnit(u Unit) error { - file := File{config.File{ - Path: u.Destination(s.root), - Content: u.Content, - RawFilePermissions: "0644", - }} - - _, err := WriteFile(&file, "/") - return err -} - -// PlaceUnitDropIn writes a unit drop-in file at its desired destination, -// creating parent directories as necessary. -func (s *systemd) PlaceUnitDropIn(u Unit, d config.UnitDropIn) error { - file := File{config.File{ - Path: u.DropInDestination(s.root, d), - Content: d.Content, - RawFilePermissions: "0644", - }} - - _, err := WriteFile(&file, "/") - return err -} - -func (s *systemd) EnableUnitFile(u Unit) error { - conn, err := dbus.New() - if err != nil { - return err - } - - units := []string{u.Name} - _, _, err = conn.EnableUnitFiles(units, u.Runtime, true) - return err -} - -func (s *systemd) RunUnitCommand(u Unit, c string) (string, error) { - conn, err := dbus.New() - if err != nil { - return "", err - } - - var fn func(string, string) (string, error) - switch c { - case "start": - fn = conn.StartUnit - case "stop": - fn = conn.StopUnit - case "restart": - fn = conn.RestartUnit - case "reload": - fn = conn.ReloadUnit - case "try-restart": - fn = conn.TryRestartUnit - case "reload-or-restart": - fn = conn.ReloadOrRestartUnit - case "reload-or-try-restart": - fn = conn.ReloadOrTryRestartUnit - default: - return "", fmt.Errorf("Unsupported systemd command %q", c) - } - - return fn(u.Name, "replace") -} - -func (s *systemd) DaemonReload() error { - conn, err := dbus.New() - if err != nil { - return err - } - - return conn.Reload() -} - -// MaskUnit masks the given Unit by symlinking its unit file to -// /dev/null, analogous to `systemctl mask`. -// N.B.: Unlike `systemctl mask`, this function will *remove any existing unit -// file at the location*, to ensure that the mask will succeed. -func (s *systemd) MaskUnit(u Unit) error { - masked := u.Destination(s.root) - if _, err := os.Stat(masked); os.IsNotExist(err) { - if err := os.MkdirAll(path.Dir(masked), os.FileMode(0755)); err != nil { - return err - } - } else if err := os.Remove(masked); err != nil { - return err - } - return os.Symlink("/dev/null", masked) -} - -// UnmaskUnit is analogous to systemd's unit_file_unmask. If the file -// associated with the given Unit is empty or appears to be a symlink to -// /dev/null, it is removed. -func (s *systemd) UnmaskUnit(u Unit) error { - masked := u.Destination(s.root) - ne, err := nullOrEmpty(masked) - if os.IsNotExist(err) { - return nil - } else if err != nil { - return err - } - if !ne { - log.Printf("%s is not null or empty, refusing to unmask", masked) - return nil - } - return os.Remove(masked) -} - -// nullOrEmpty checks whether a given path appears to be an empty regular file -// or a symlink to /dev/null -func nullOrEmpty(path string) (bool, error) { - fi, err := os.Stat(path) - if err != nil { - return false, err - } - m := fi.Mode() - if m.IsRegular() && fi.Size() <= 0 { - return true, nil - } - if m&os.ModeCharDevice > 0 { - return true, nil - } - return false, nil -} - -func ExecuteScript(scriptPath string) (string, error) { - props := []dbus.Property{ - dbus.PropDescription("Unit generated and executed by coreos-cloudinit on behalf of user"), - dbus.PropExecStart([]string{"/bin/bash", scriptPath}, false), - } - - base := path.Base(scriptPath) - name := fmt.Sprintf("coreos-cloudinit-%s.service", base) - - log.Printf("Creating transient systemd unit '%s'", name) - - conn, err := dbus.New() - if err != nil { - return "", err - } - - _, err = conn.StartTransientUnit(name, "replace", props...) - return name, err -} - -func SetHostname(hostname string) error { - return exec.Command("hostnamectl", "set-hostname", hostname).Run() -} - -func Hostname() (string, error) { - return os.Hostname() -} - -func MachineID(root string) string { - contents, _ := ioutil.ReadFile(path.Join(root, "etc", "machine-id")) - id := strings.TrimSpace(string(contents)) - - if id == fakeMachineID { - id = "" - } - - return id -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/systemd_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/systemd_test.go deleted file mode 100644 index 82052abc..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/systemd_test.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "testing" - - "github.com/coreos/coreos-cloudinit/config" -) - -func TestPlaceUnit(t *testing.T) { - tests := []config.Unit{ - { - Name: "50-eth0.network", - Runtime: true, - Content: "[Match]\nName=eth47\n\n[Network]\nAddress=10.209.171.177/19\n", - }, - { - Name: "media-state.mount", - Content: "[Mount]\nWhat=/dev/sdb1\nWhere=/media/state\n", - }, - } - - for _, tt := range tests { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - panic(fmt.Sprintf("Unable to create tempdir: %v", err)) - } - - u := Unit{tt} - sd := &systemd{dir} - - if err := sd.PlaceUnit(u); err != nil { - t.Fatalf("PlaceUnit(): bad error (%+v): want nil, got %s", tt, err) - } - - fi, err := os.Stat(u.Destination(dir)) - if err != nil { - t.Fatalf("Stat(): bad error (%+v): want nil, got %s", tt, err) - } - - if mode := fi.Mode(); mode != os.FileMode(0644) { - t.Errorf("bad filemode (%+v): want %v, got %v", tt, os.FileMode(0644), mode) - } - - c, err := ioutil.ReadFile(u.Destination(dir)) - if err != nil { - t.Fatalf("ReadFile(): bad error (%+v): want nil, got %s", tt, err) - } - - if string(c) != tt.Content { - t.Errorf("bad contents (%+v): want %q, got %q", tt, tt.Content, string(c)) - } - - os.RemoveAll(dir) - } -} - -func TestPlaceUnitDropIn(t *testing.T) { - tests := []config.Unit{ - { - Name: "false.service", - Runtime: true, - DropIns: []config.UnitDropIn{ - { - Name: "00-true.conf", - Content: "[Service]\nExecStart=\nExecStart=/usr/bin/true\n", - }, - }, - }, - { - Name: "true.service", - DropIns: []config.UnitDropIn{ - { - Name: "00-false.conf", - Content: "[Service]\nExecStart=\nExecStart=/usr/bin/false\n", - }, - }, - }, - } - - for _, tt := range tests { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - panic(fmt.Sprintf("Unable to create tempdir: %v", err)) - } - - u := Unit{tt} - sd := &systemd{dir} - - if err := sd.PlaceUnitDropIn(u, u.DropIns[0]); err != nil { - t.Fatalf("PlaceUnit(): bad error (%+v): want nil, got %s", tt, err) - } - - fi, err := os.Stat(u.DropInDestination(dir, u.DropIns[0])) - if err != nil { - t.Fatalf("Stat(): bad error (%+v): want nil, got %s", tt, err) - } - - if mode := fi.Mode(); mode != os.FileMode(0644) { - t.Errorf("bad filemode (%+v): want %v, got %v", tt, os.FileMode(0644), mode) - } - - c, err := ioutil.ReadFile(u.DropInDestination(dir, u.DropIns[0])) - if err != nil { - t.Fatalf("ReadFile(): bad error (%+v): want nil, got %s", tt, err) - } - - if string(c) != u.DropIns[0].Content { - t.Errorf("bad contents (%+v): want %q, got %q", tt, u.DropIns[0].Content, string(c)) - } - - os.RemoveAll(dir) - } -} - -func TestMachineID(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - os.Mkdir(path.Join(dir, "etc"), os.FileMode(0755)) - ioutil.WriteFile(path.Join(dir, "etc", "machine-id"), []byte("node007\n"), os.FileMode(0444)) - - if MachineID(dir) != "node007" { - t.Fatalf("File has incorrect contents") - } -} - -func TestMaskUnit(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - sd := &systemd{dir} - - // Ensure mask works with units that do not currently exist - uf := Unit{config.Unit{Name: "foo.service"}} - if err := sd.MaskUnit(uf); err != nil { - t.Fatalf("Unable to mask new unit: %v", err) - } - fooPath := path.Join(dir, "etc", "systemd", "system", "foo.service") - fooTgt, err := os.Readlink(fooPath) - if err != nil { - t.Fatal("Unable to read link", err) - } - if fooTgt != "/dev/null" { - t.Fatal("unit not masked, got unit target", fooTgt) - } - - // Ensure mask works with unit files that already exist - ub := Unit{config.Unit{Name: "bar.service"}} - barPath := path.Join(dir, "etc", "systemd", "system", "bar.service") - if _, err := os.Create(barPath); err != nil { - t.Fatalf("Error creating new unit file: %v", err) - } - if err := sd.MaskUnit(ub); err != nil { - t.Fatalf("Unable to mask existing unit: %v", err) - } - barTgt, err := os.Readlink(barPath) - if err != nil { - t.Fatal("Unable to read link", err) - } - if barTgt != "/dev/null" { - t.Fatal("unit not masked, got unit target", barTgt) - } -} - -func TestUnmaskUnit(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - sd := &systemd{dir} - - nilUnit := Unit{config.Unit{Name: "null.service"}} - if err := sd.UnmaskUnit(nilUnit); err != nil { - t.Errorf("unexpected error from unmasking nonexistent unit: %v", err) - } - - uf := Unit{config.Unit{Name: "foo.service", Content: "[Service]\nExecStart=/bin/true"}} - dst := uf.Destination(dir) - if err := os.MkdirAll(path.Dir(dst), os.FileMode(0755)); err != nil { - t.Fatalf("Unable to create unit directory: %v", err) - } - if _, err := os.Create(dst); err != nil { - t.Fatalf("Unable to write unit file: %v", err) - } - - if err := ioutil.WriteFile(dst, []byte(uf.Content), 700); err != nil { - t.Fatalf("Unable to write unit file: %v", err) - } - if err := sd.UnmaskUnit(uf); err != nil { - t.Errorf("unmask of non-empty unit returned unexpected error: %v", err) - } - got, _ := ioutil.ReadFile(dst) - if string(got) != uf.Content { - t.Errorf("unmask of non-empty unit mutated unit contents unexpectedly") - } - - ub := Unit{config.Unit{Name: "bar.service"}} - dst = ub.Destination(dir) - if err := os.Symlink("/dev/null", dst); err != nil { - t.Fatalf("Unable to create masked unit: %v", err) - } - if err := sd.UnmaskUnit(ub); err != nil { - t.Errorf("unmask of unit returned unexpected error: %v", err) - } - if _, err := os.Stat(dst); !os.IsNotExist(err) { - t.Errorf("expected %s to not exist after unmask, but got err: %s", dst, err) - } -} - -func TestNullOrEmpty(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-") - if err != nil { - t.Fatalf("Unable to create tempdir: %v", err) - } - defer os.RemoveAll(dir) - - non := path.Join(dir, "does_not_exist") - ne, err := nullOrEmpty(non) - if !os.IsNotExist(err) { - t.Errorf("nullOrEmpty on nonexistent file returned bad error: %v", err) - } - if ne { - t.Errorf("nullOrEmpty returned true unxpectedly") - } - - regEmpty := path.Join(dir, "regular_empty_file") - _, err = os.Create(regEmpty) - if err != nil { - t.Fatalf("Unable to create tempfile: %v", err) - } - gotNe, gotErr := nullOrEmpty(regEmpty) - if !gotNe || gotErr != nil { - t.Errorf("nullOrEmpty of regular empty file returned %t, %v - want true, nil", gotNe, gotErr) - } - - reg := path.Join(dir, "regular_file") - if err := ioutil.WriteFile(reg, []byte("asdf"), 700); err != nil { - t.Fatalf("Unable to create tempfile: %v", err) - } - gotNe, gotErr = nullOrEmpty(reg) - if gotNe || gotErr != nil { - t.Errorf("nullOrEmpty of regular file returned %t, %v - want false, nil", gotNe, gotErr) - } - - null := path.Join(dir, "null") - if err := os.Symlink(os.DevNull, null); err != nil { - t.Fatalf("Unable to create /dev/null link: %s", err) - } - gotNe, gotErr = nullOrEmpty(null) - if !gotNe || gotErr != nil { - t.Errorf("nullOrEmpty of null symlink returned %t, %v - want true, nil", gotNe, gotErr) - } - -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/unit_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/unit_test.go deleted file mode 100644 index c995b592..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/unit_test.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "testing" - - "github.com/coreos/coreos-cloudinit/config" -) - -func TestType(t *testing.T) { - tests := []struct { - name string - - typ string - }{ - {}, - {"test.service", "service"}, - {"hello", ""}, - {"lots.of.dots", "dots"}, - } - - for _, tt := range tests { - u := Unit{config.Unit{ - Name: tt.name, - }} - if typ := u.Type(); tt.typ != typ { - t.Errorf("bad type (%+v): want %q, got %q", tt, tt.typ, typ) - } - } -} - -func TestGroup(t *testing.T) { - tests := []struct { - name string - - group string - }{ - {"test.service", "system"}, - {"test.link", "network"}, - {"test.network", "network"}, - {"test.netdev", "network"}, - {"test.conf", "system"}, - } - - for _, tt := range tests { - u := Unit{config.Unit{ - Name: tt.name, - }} - if group := u.Group(); tt.group != group { - t.Errorf("bad group (%+v): want %q, got %q", tt, tt.group, group) - } - } -} - -func TestDestination(t *testing.T) { - tests := []struct { - root string - name string - runtime bool - - destination string - }{ - { - root: "/some/dir", - name: "foobar.service", - destination: "/some/dir/etc/systemd/system/foobar.service", - }, - { - root: "/some/dir", - name: "foobar.service", - runtime: true, - destination: "/some/dir/run/systemd/system/foobar.service", - }, - } - - for _, tt := range tests { - u := Unit{config.Unit{ - Name: tt.name, - Runtime: tt.runtime, - }} - if d := u.Destination(tt.root); tt.destination != d { - t.Errorf("bad destination (%+v): want %q, got %q", tt, tt.destination, d) - } - } -} - -func TestDropInDestination(t *testing.T) { - tests := []struct { - root string - unitName string - dropInName string - runtime bool - - destination string - }{ - { - root: "/some/dir", - unitName: "foo.service", - dropInName: "bar.conf", - destination: "/some/dir/etc/systemd/system/foo.service.d/bar.conf", - }, - { - root: "/some/dir", - unitName: "foo.service", - dropInName: "bar.conf", - runtime: true, - destination: "/some/dir/run/systemd/system/foo.service.d/bar.conf", - }, - } - - for _, tt := range tests { - u := Unit{config.Unit{ - Name: tt.unitName, - Runtime: tt.runtime, - DropIns: []config.UnitDropIn{{ - Name: tt.dropInName, - }}, - }} - if d := u.DropInDestination(tt.root, u.DropIns[0]); tt.destination != d { - t.Errorf("bad destination (%+v): want %q, got %q", tt, tt.destination, d) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/update_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/update_test.go deleted file mode 100644 index ae3972b8..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/update_test.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "io" - "reflect" - "strings" - "testing" - - "github.com/coreos/coreos-cloudinit/config" -) - -func testReadConfig(config string) func() (io.Reader, error) { - return func() (io.Reader, error) { - return strings.NewReader(config), nil - } -} - -func TestUpdateUnits(t *testing.T) { - for _, tt := range []struct { - config config.Update - units []Unit - err error - }{ - { - config: config.Update{}, - }, - { - config: config.Update{Group: "master", Server: "http://foo.com"}, - units: []Unit{{config.Unit{ - Name: "update-engine.service", - Command: "restart", - }}}, - }, - { - config: config.Update{RebootStrategy: "best-effort"}, - units: []Unit{{config.Unit{ - Name: "locksmithd.service", - Command: "restart", - Runtime: true, - }}}, - }, - { - config: config.Update{RebootStrategy: "etcd-lock"}, - units: []Unit{{config.Unit{ - Name: "locksmithd.service", - Command: "restart", - Runtime: true, - }}}, - }, - { - config: config.Update{RebootStrategy: "reboot"}, - units: []Unit{{config.Unit{ - Name: "locksmithd.service", - Command: "restart", - Runtime: true, - }}}, - }, - { - config: config.Update{RebootStrategy: "off"}, - units: []Unit{{config.Unit{ - Name: "locksmithd.service", - Command: "stop", - Runtime: true, - Mask: true, - }}}, - }, - } { - units := Update{Update: tt.config, ReadConfig: testReadConfig("")}.Units() - if !reflect.DeepEqual(tt.units, units) { - t.Errorf("bad units (%q): want %#v, got %#v", tt.config, tt.units, units) - } - } -} - -func TestUpdateFile(t *testing.T) { - for _, tt := range []struct { - config config.Update - orig string - file *File - err error - }{ - { - config: config.Update{}, - }, - { - config: config.Update{RebootStrategy: "wizzlewazzle"}, - err: &config.ErrorValid{Value: "wizzlewazzle", Field: "RebootStrategy", Valid: "^(best-effort|etcd-lock|reboot|off)$"}, - }, - { - config: config.Update{Group: "master", Server: "http://foo.com"}, - file: &File{config.File{ - Content: "GROUP=master\nSERVER=http://foo.com\n", - Path: "etc/coreos/update.conf", - RawFilePermissions: "0644", - }}, - }, - { - config: config.Update{RebootStrategy: "best-effort"}, - file: &File{config.File{ - Content: "REBOOT_STRATEGY=best-effort\n", - Path: "etc/coreos/update.conf", - RawFilePermissions: "0644", - }}, - }, - { - config: config.Update{RebootStrategy: "etcd-lock"}, - file: &File{config.File{ - Content: "REBOOT_STRATEGY=etcd-lock\n", - Path: "etc/coreos/update.conf", - RawFilePermissions: "0644", - }}, - }, - { - config: config.Update{RebootStrategy: "reboot"}, - file: &File{config.File{ - Content: "REBOOT_STRATEGY=reboot\n", - Path: "etc/coreos/update.conf", - RawFilePermissions: "0644", - }}, - }, - { - config: config.Update{RebootStrategy: "off"}, - file: &File{config.File{ - Content: "REBOOT_STRATEGY=off\n", - Path: "etc/coreos/update.conf", - RawFilePermissions: "0644", - }}, - }, - { - config: config.Update{RebootStrategy: "etcd-lock"}, - orig: "SERVER=https://example.com\nGROUP=thegroupc\nREBOOT_STRATEGY=awesome", - file: &File{config.File{ - Content: "SERVER=https://example.com\nGROUP=thegroupc\nREBOOT_STRATEGY=etcd-lock\n", - Path: "etc/coreos/update.conf", - RawFilePermissions: "0644", - }}, - }, - } { - file, err := Update{Update: tt.config, ReadConfig: testReadConfig(tt.orig)}.File() - if !reflect.DeepEqual(tt.err, err) { - t.Errorf("bad error (%q): want %q, got %q", tt.config, tt.err, err) - } - if !reflect.DeepEqual(tt.file, file) { - t.Errorf("bad units (%q): want %#v, got %#v", tt.config, tt.file, file) - } - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/test b/vendor/github.com/coreos/coreos-cloudinit/test index 645e967a..be9454ab 100755 --- a/vendor/github.com/coreos/coreos-cloudinit/test +++ b/vendor/github.com/coreos/coreos-cloudinit/test @@ -2,42 +2,26 @@ source ./build -SRC=" - config - config/validate - datasource - datasource/configdrive - datasource/file - datasource/metadata - datasource/metadata/cloudsigma - datasource/metadata/digitalocean - datasource/metadata/ec2 - datasource/proc_cmdline - datasource/test - datasource/url - datasource/vmware - datasource/waagent - initialize - network - pkg - system - . -" +SRC=$(find . -name '*.go' \ + -not -path "./vendor/*") + +PKG=$(cd gopath/src/${REPO_PATH}; go list ./... | \ + grep --invert-match vendor) echo "Checking gofix..." go tool fix -diff $SRC echo "Checking gofmt..." -gofmt -d -e $SRC - -# split SRC into an array and prepend REPO_PATH to each local package for go vet -split_vet=(${SRC// / }) -VET_TEST="${REPO_PATH} ${split_vet[@]/#/${REPO_PATH}/}" +res=$(gofmt -d -e -s $SRC) +echo "${res}" +if [ -n "${res}" ]; then + exit 1 +fi echo "Checking govet..." -go vet $VET_TEST +go vet $PKG echo "Running tests..." -go test -timeout 60s -cover $@ ${VET_TEST} --race +go test -timeout 60s -cover $@ ${PKG} --race echo "Success" diff --git a/vendor/github.com/coreos/coreos-cloudinit/vendor.manifest b/vendor/github.com/coreos/coreos-cloudinit/vendor.manifest new file mode 100644 index 00000000..c7e212b8 --- /dev/null +++ b/vendor/github.com/coreos/coreos-cloudinit/vendor.manifest @@ -0,0 +1,11 @@ +# If you manipulate the contents of vendor/, amend this accordingly. +# pkg version +github.com/cloudsigma/cepgo 1bfc4895bf5c4d3b599f3f6ee142299488c8739b +github.com/coreos/go-systemd/dbus 4fbc5060a317b142e6c7bfbedb65596d5f0ab99b +github.com/coreos/yaml 6b16a5714269b2f70720a45406b1babd947a17ef +github.com/dotcloud/docker/pkg/netlink 55d41c3e21e1593b944c06196ffb2ac57ab7f653 +github.com/guelfey/go.dbus f6a3a2366cc39b8479cadc499d3c735fb10fbdda +github.com/tarm/goserial cdabc8d44e8e84f58f18074ae44337e1f2f375b9 +github.com/sigma/vmw-guestinfo 95dd4126d6e8b4ef1970b3f3fe2e8cdd470d2903 +github.com/sigma/vmw-ovflib 56b4f44581cac03d17d8270158bdfd0942ffe790 +github.com/sigma/bdoor babf2a4017b020d4ce04e8167076186e82645dd1 diff --git a/vendor/github.com/coreos/go-systemd/.travis.yml b/vendor/github.com/coreos/go-systemd/.travis.yml index 8c9f56e4..3c37292e 100644 --- a/vendor/github.com/coreos/go-systemd/.travis.yml +++ b/vendor/github.com/coreos/go-systemd/.travis.yml @@ -1,8 +1,8 @@ language: go -go: 1.2 +go: 1.4 install: - - echo "Skip install" + - go get github.com/godbus/dbus script: - ./test diff --git a/vendor/github.com/coreos/go-systemd/README.md b/vendor/github.com/coreos/go-systemd/README.md index 0ee09fec..cb87a112 100644 --- a/vendor/github.com/coreos/go-systemd/README.md +++ b/vendor/github.com/coreos/go-systemd/README.md @@ -1,29 +1,31 @@ # go-systemd -Go bindings to systemd. The project has three packages: +[![Build Status](https://travis-ci.org/coreos/go-systemd.png?branch=master)](https://travis-ci.org/coreos/go-systemd) +[![godoc](https://godoc.org/github.com/coreos/go-systemd?status.svg)](http://godoc.org/github.com/coreos/go-systemd) -- activation - for writing and using socket activation from Go -- journal - for writing to systemd's logging service, journal -- dbus - for starting/stopping/inspecting running services and units +Go bindings to systemd. The project has several packages: -Go docs for the entire project are here: - -http://godoc.org/github.com/coreos/go-systemd +- `activation` - for writing and using socket activation from Go +- `dbus` - for starting/stopping/inspecting running services and units +- `journal` - for writing to systemd's logging service, journald +- `sdjournal` - for reading from journald by wrapping its C API +- `machine1` - for registering machines/containers with systemd +- `unit` - for (de)serialization and comparison of unit files ## Socket Activation -An example HTTP server using socket activation can be quickly setup by -following this README on a Linux machine running systemd: +An example HTTP server using socket activation can be quickly set up by following this README on a Linux machine running systemd: https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver ## Journal -Using this package you can submit journal entries directly to systemd's journal taking advantage of features like indexed key/value pairs for each log entry. +Using the pure-Go `journal` package you can submit journal entries directly to systemd's journal, taking advantage of features like indexed key/value pairs for each log entry. +The `sdjournal` package provides read access to the journal by wrapping around journald's native C API; consequently it requires cgo and the journal headers to be available. ## D-Bus -The D-Bus API lets you start, stop and introspect systemd units. The API docs are here: +The `dbus` package connects to the [systemd D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/dbus/) and lets you start, stop and introspect systemd units. The API docs are here: http://godoc.org/github.com/coreos/go-systemd/dbus @@ -42,3 +44,11 @@ Create `/etc/dbus-1/system-local.conf` that looks like this: ``` + +## machined + +The `machine1` package allows interaction with the [systemd machined D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/machined/). + +## Units + +The `unit` package provides various functions for working with [systemd unit files](http://www.freedesktop.org/software/systemd/man/systemd.unit.html). diff --git a/vendor/github.com/coreos/go-systemd/activation/files.go b/vendor/github.com/coreos/go-systemd/activation/files.go new file mode 100644 index 00000000..c8e85fcd --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/files.go @@ -0,0 +1,52 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package activation implements primitives for systemd socket activation. +package activation + +import ( + "os" + "strconv" + "syscall" +) + +// based on: https://gist.github.com/alberts/4640792 +const ( + listenFdsStart = 3 +) + +func Files(unsetEnv bool) []*os.File { + if unsetEnv { + defer os.Unsetenv("LISTEN_PID") + defer os.Unsetenv("LISTEN_FDS") + } + + pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) + if err != nil || pid != os.Getpid() { + return nil + } + + nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) + if err != nil || nfds == 0 { + return nil + } + + files := make([]*os.File, 0, nfds) + for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { + syscall.CloseOnExec(fd) + files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd))) + } + + return files +} diff --git a/vendor/github.com/coreos/go-systemd/activation/listeners.go b/vendor/github.com/coreos/go-systemd/activation/listeners.go new file mode 100644 index 00000000..df27c29e --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/listeners.go @@ -0,0 +1,62 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "crypto/tls" + "net" +) + +// Listeners returns a slice containing a net.Listener for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener} +func Listeners(unsetEnv bool) ([]net.Listener, error) { + files := Files(unsetEnv) + listeners := make([]net.Listener, len(files)) + + for i, f := range files { + if pc, err := net.FileListener(f); err == nil { + listeners[i] = pc + } + } + return listeners, nil +} + +// TLSListeners returns a slice containing a net.listener for each matching TCP socket type +// passed to this process. +// It uses default Listeners func and forces TCP sockets handlers to use TLS based on tlsConfig. +func TLSListeners(unsetEnv bool, tlsConfig *tls.Config) ([]net.Listener, error) { + listeners, err := Listeners(unsetEnv) + + if listeners == nil || err != nil { + return nil, err + } + + if tlsConfig != nil && err == nil { + tlsConfig.NextProtos = []string{"http/1.1"} + + for i, l := range listeners { + // Activate TLS only for TCP sockets + if l.Addr().Network() == "tcp" { + listeners[i] = tls.NewListener(l, tlsConfig) + } + } + } + + return listeners, err +} diff --git a/vendor/github.com/coreos/go-systemd/activation/packetconns.go b/vendor/github.com/coreos/go-systemd/activation/packetconns.go new file mode 100644 index 00000000..48b2ca02 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/packetconns.go @@ -0,0 +1,37 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "net" +) + +// PacketConns returns a slice containing a net.PacketConn for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn} +func PacketConns(unsetEnv bool) ([]net.PacketConn, error) { + files := Files(unsetEnv) + conns := make([]net.PacketConn, len(files)) + + for i, f := range files { + if pc, err := net.FilePacketConn(f); err == nil { + conns[i] = pc + } + } + return conns, nil +} diff --git a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go new file mode 100644 index 00000000..b92b1911 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go @@ -0,0 +1,31 @@ +// Code forked from Docker project +package daemon + +import ( + "errors" + "net" + "os" +) + +var SdNotifyNoSocket = errors.New("No socket") + +// SdNotify sends a message to the init daemon. It is common to ignore the error. +func SdNotify(state string) error { + socketAddr := &net.UnixAddr{ + Name: os.Getenv("NOTIFY_SOCKET"), + Net: "unixgram", + } + + if socketAddr.Name == "" { + return SdNotifyNoSocket + } + + conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) + if err != nil { + return err + } + defer conn.Close() + + _, err = conn.Write([]byte(state)) + return err +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go index 88927b7f..5dd748e6 100644 --- a/vendor/github.com/coreos/go-systemd/dbus/dbus.go +++ b/vendor/github.com/coreos/go-systemd/dbus/dbus.go @@ -1,49 +1,77 @@ -/* -Copyright 2013 CoreOS Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ package dbus import ( + "fmt" "os" "strconv" "strings" "sync" - "github.com/guelfey/go.dbus" + "github.com/godbus/dbus" ) -const signalBuffer = 100 +const ( + alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` + num = `0123456789` + alphanum = alpha + num + signalBuffer = 100 +) -// ObjectPath creates a dbus.ObjectPath using the rules that systemd uses for -// serializing special characters. -func ObjectPath(path string) dbus.ObjectPath { - path = strings.Replace(path, ".", "_2e", -1) - path = strings.Replace(path, "-", "_2d", -1) - path = strings.Replace(path, "@", "_40", -1) - - return dbus.ObjectPath(path) +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped +func needsEscape(i int, b byte) bool { + // Escape everything that is not a-z-A-Z-0-9 + // Also escape 0-9 if it's the first character + return strings.IndexByte(alphanum, b) == -1 || + (i == 0 && strings.IndexByte(num, b) != -1) } -// Conn is a connection to systemds dbus endpoint. +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the +// rules that systemd uses for serializing special characters. +func PathBusEscape(path string) string { + // Special case the empty string + if len(path) == 0 { + return "_" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if needsEscape(i, c) { + e := fmt.Sprintf("_%x", c) + n = append(n, []byte(e)...) + } else { + n = append(n, c) + } + } + return string(n) +} + +// Conn is a connection to systemd's dbus endpoint. type Conn struct { - sysconn *dbus.Conn - sysobj *dbus.Object + // sysconn/sysobj are only used to call dbus methods + sysconn *dbus.Conn + sysobj dbus.BusObject + + // sigconn/sigobj are only used to receive dbus signals + sigconn *dbus.Conn + sigobj dbus.BusObject + jobListener struct { - jobs map[dbus.ObjectPath]chan string + jobs map[dbus.ObjectPath]chan<- string sync.Mutex } subscriber struct { @@ -53,26 +81,77 @@ type Conn struct { ignore map[dbus.ObjectPath]int64 cleanIgnore int64 } - dispatch map[string]func(dbus.Signal) } -// New() establishes a connection to the system bus and authenticates. +// New establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection. func New() (*Conn, error) { - c := new(Conn) + return newConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SystemBusPrivate) + }) +} - if err := c.initConnection(); err != nil { +// NewUserConnection establishes a connection to the session bus and +// authenticates. This can be used to connect to systemd user instances. +// Callers should call Close() when done with the connection. +func NewUserConnection() (*Conn, error) { + return newConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SessionBusPrivate) + }) +} + +// NewSystemdConnection establishes a private, direct connection to systemd. +// This can be used for communicating with systemd without a dbus daemon. +// Callers should call Close() when done with the connection. +func NewSystemdConnection() (*Conn, error) { + return newConnection(func() (*dbus.Conn, error) { + // We skip Hello when talking directly to systemd. + return dbusAuthConnection(func() (*dbus.Conn, error) { + return dbus.Dial("unix:path=/run/systemd/private") + }) + }) +} + +// Close closes an established connection +func (c *Conn) Close() { + c.sysconn.Close() + c.sigconn.Close() +} + +func newConnection(createBus func() (*dbus.Conn, error)) (*Conn, error) { + sysconn, err := createBus() + if err != nil { return nil, err } - c.initJobs() + sigconn, err := createBus() + if err != nil { + sysconn.Close() + return nil, err + } + + c := &Conn{ + sysconn: sysconn, + sysobj: systemdObject(sysconn), + sigconn: sigconn, + sigobj: systemdObject(sigconn), + } + + c.subscriber.ignore = make(map[dbus.ObjectPath]int64) + c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) + + // Setup the listeners on jobs so that we can get completions + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + + c.dispatch() return c, nil } -func (c *Conn) initConnection() error { - var err error - c.sysconn, err = dbus.SystemBusPrivate() +func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus() if err != nil { - return err + return nil, err } // Only use EXTERNAL method, and hardcode the uid (not username) @@ -80,25 +159,29 @@ func (c *Conn) initConnection() error { // libc) methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} - err = c.sysconn.Auth(methods) + err = conn.Auth(methods) if err != nil { - c.sysconn.Close() - return err + conn.Close() + return nil, err } - err = c.sysconn.Hello() - if err != nil { - c.sysconn.Close() - return err - } - - c.sysobj = c.sysconn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) - - // Setup the listeners on jobs so that we can get completions - c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") - c.initSubscription() - c.initDispatch() - - return nil + return conn, nil +} + +func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := dbusAuthConnection(createBus) + if err != nil { + return nil, err + } + + if err = conn.Hello(); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func systemdObject(conn *dbus.Conn) dbus.BusObject { + return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) } diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus_test.go b/vendor/github.com/coreos/go-systemd/dbus/dbus_test.go deleted file mode 100644 index 2e80f73e..00000000 --- a/vendor/github.com/coreos/go-systemd/dbus/dbus_test.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2013 CoreOS Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dbus - -import ( - "testing" -) - -// TestObjectPath ensures path encoding of the systemd rules works. -func TestObjectPath(t *testing.T) { - input := "/silly-path/to@a/unit..service" - output := ObjectPath(input) - expected := "/silly_2dpath/to_40a/unit_2e_2eservice" - - if string(output) != expected { - t.Fatalf("Output '%s' did not match expected '%s'", output, expected) - } -} - -// TestNew ensures that New() works without errors. -func TestNew(t *testing.T) { - _, err := New() - - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go index 2897b11e..ab614c7c 100644 --- a/vendor/github.com/coreos/go-systemd/dbus/methods.go +++ b/vendor/github.com/coreos/go-systemd/dbus/methods.go @@ -1,29 +1,26 @@ -/* -Copyright 2013 CoreOS Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package dbus import ( "errors" - "github.com/guelfey/go.dbus" -) + "path" + "strconv" -func (c *Conn) initJobs() { - c.jobListener.jobs = make(map[dbus.ObjectPath]chan string) -} + "github.com/godbus/dbus" +) func (c *Conn) jobComplete(signal *dbus.Signal) { var id uint32 @@ -40,29 +37,29 @@ func (c *Conn) jobComplete(signal *dbus.Signal) { c.jobListener.Unlock() } -func (c *Conn) startJob(job string, args ...interface{}) (<-chan string, error) { - c.jobListener.Lock() - defer c.jobListener.Unlock() - - ch := make(chan string, 1) - var path dbus.ObjectPath - err := c.sysobj.Call(job, 0, args...).Store(&path) - if err != nil { - return nil, err +func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) { + if ch != nil { + c.jobListener.Lock() + defer c.jobListener.Unlock() } - c.jobListener.jobs[path] = ch - return ch, nil + + var p dbus.ObjectPath + err := c.sysobj.Call(job, 0, args...).Store(&p) + if err != nil { + return 0, err + } + + if ch != nil { + c.jobListener.jobs[p] = ch + } + + // ignore error since 0 is fine if conversion fails + jobID, _ := strconv.Atoi(path.Base(string(p))) + + return jobID, nil } -func (c *Conn) runJob(job string, args ...interface{}) (string, error) { - respCh, err := c.startJob(job, args...) - if err != nil { - return "", err - } - return <-respCh, nil -} - -// StartUnit enqeues a start job and depending jobs, if any (unless otherwise +// StartUnit enqueues a start job and depending jobs, if any (unless otherwise // specified by the mode string). // // Takes the unit to activate, plus a mode string. The mode needs to be one of @@ -77,50 +74,58 @@ func (c *Conn) runJob(job string, args ...interface{}) (string, error) { // requirement dependencies. It is not recommended to make use of the latter // two options. // -// Result string: one of done, canceled, timeout, failed, dependency, skipped. +// If the provided channel is non-nil, a result string will be sent to it upon +// job completion: one of done, canceled, timeout, failed, dependency, skipped. // done indicates successful execution of a job. canceled indicates that a job // has been canceled before it finished execution. timeout indicates that the // job timeout was reached. failed indicates that the job failed. dependency // indicates that a job this job has been depending on failed and the job hence // has been removed too. skipped indicates that a job was skipped because it // didn't apply to the units current state. -func (c *Conn) StartUnit(name string, mode string) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.StartUnit", name, mode) +// +// If no error occurs, the ID of the underlying systemd job will be returned. There +// does exist the possibility for no error to be returned, but for the returned job +// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint +// should not be considered authoritative. +// +// If an error does occur, it will be returned to the user alongside a job ID of 0. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) } // StopUnit is similar to StartUnit but stops the specified unit rather // than starting it. -func (c *Conn) StopUnit(name string, mode string) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.StopUnit", name, mode) +func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) } // ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. -func (c *Conn) ReloadUnit(name string, mode string) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) } // RestartUnit restarts a service. If a service is restarted that isn't // running it will be started. -func (c *Conn) RestartUnit(name string, mode string) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) } // TryRestartUnit is like RestartUnit, except that a service that isn't running // is not affected by the restart. -func (c *Conn) TryRestartUnit(name string, mode string) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) } // ReloadOrRestart attempts a reload if the unit supports it and use a restart // otherwise. -func (c *Conn) ReloadOrRestartUnit(name string, mode string) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) } // ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try" // flavored restart otherwise. -func (c *Conn) ReloadOrTryRestartUnit(name string, mode string) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) } // StartTransientUnit() may be used to create and start a transient unit, which @@ -128,8 +133,8 @@ func (c *Conn) ReloadOrTryRestartUnit(name string, mode string) (string, error) // system is rebooted. name is the unit name including suffix, and must be // unique. mode is the same as in StartUnit(), properties contains properties // of the unit. -func (c *Conn) StartTransientUnit(name string, mode string, properties ...Property) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) } // KillUnit takes the unit name and a UNIX signal number to send. All of the unit's @@ -138,12 +143,17 @@ func (c *Conn) KillUnit(name string, signal int32) { c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() } +// ResetFailedUnit resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnit(name string) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() +} + // getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) { var err error var props map[string]dbus.Variant - path := ObjectPath("/org/freedesktop/systemd1/unit/" + unit) + path := unitPath(unit) if !path.IsValid() { return nil, errors.New("invalid unit name: " + unit) } @@ -171,7 +181,7 @@ func (c *Conn) getProperty(unit string, dbusInterface string, propertyName strin var err error var prop dbus.Variant - path := ObjectPath("/org/freedesktop/systemd1/unit/" + unit) + path := unitPath(unit) if !path.IsValid() { return nil, errors.New("invalid unit name: " + unit) } @@ -204,11 +214,11 @@ func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]i // to modify. properties are the settings to set, encoded as an array of property // name and value pairs. func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { - return c.sysobj.Call("SetUnitProperties", 0, name, runtime, properties).Store() + return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() } func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { - return c.getProperty(unit, "org.freedesktop.systemd1." + unitType, propertyName) + return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) } // ListUnits returns an array with all currently loaded units. Note that @@ -253,6 +263,48 @@ type UnitStatus struct { JobPath dbus.ObjectPath // The job object path } +type LinkUnitFileChange EnableUnitFileChange + +// LinkUnitFiles() links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + // EnableUnitFiles() may be used to enable one or more units in the system (by // creating symlinks to them in /etc or /run). // @@ -317,7 +369,7 @@ type EnableUnitFileChange struct { // symlink. func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { result := make([][]interface{}, 0) - err := c.sysobj.Call("DisableUnitFiles", 0, files, runtime).Store(&result) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) if err != nil { return nil, err } @@ -352,3 +404,7 @@ type DisableUnitFileChange struct { func (c *Conn) Reload() error { return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() } + +func unitPath(name string) dbus.ObjectPath { + return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods_test.go b/vendor/github.com/coreos/go-systemd/dbus/methods_test.go deleted file mode 100644 index 9e2f2232..00000000 --- a/vendor/github.com/coreos/go-systemd/dbus/methods_test.go +++ /dev/null @@ -1,314 +0,0 @@ -/* -Copyright 2013 CoreOS Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dbus - -import ( - "fmt" - "github.com/guelfey/go.dbus" - "math/rand" - "os" - "path/filepath" - "reflect" - "testing" -) - -func setupConn(t *testing.T) *Conn { - conn, err := New() - if err != nil { - t.Fatal(err) - } - - return conn -} - -func setupUnit(target string, conn *Conn, t *testing.T) { - // Blindly stop the unit in case it is running - conn.StopUnit(target, "replace") - - // Blindly remove the symlink in case it exists - targetRun := filepath.Join("/run/systemd/system/", target) - err := os.Remove(targetRun) - - // 1. Enable the unit - abs, err := filepath.Abs("../fixtures/" + target) - if err != nil { - t.Fatal(err) - } - - fixture := []string{abs} - - install, changes, err := conn.EnableUnitFiles(fixture, true, true) - if err != nil { - t.Fatal(err) - } - - if install != false { - t.Fatal("Install was true") - } - - if len(changes) < 1 { - t.Fatalf("Expected one change, got %v", changes) - } - - if changes[0].Filename != targetRun { - t.Fatal("Unexpected target filename") - } -} - -// Ensure that basic unit starting and stopping works. -func TestStartStopUnit(t *testing.T) { - target := "start-stop.service" - conn := setupConn(t) - - setupUnit(target, conn, t) - - // 2. Start the unit - job, err := conn.StartUnit(target, "replace") - if err != nil { - t.Fatal(err) - } - - if job != "done" { - t.Fatal("Job is not done, %v", job) - } - - units, err := conn.ListUnits() - - var unit *UnitStatus - for _, u := range units { - if u.Name == target { - unit = &u - } - } - - if unit == nil { - t.Fatalf("Test unit not found in list") - } - - if unit.ActiveState != "active" { - t.Fatalf("Test unit not active") - } - - // 3. Stop the unit - job, err = conn.StopUnit(target, "replace") - if err != nil { - t.Fatal(err) - } - - units, err = conn.ListUnits() - - unit = nil - for _, u := range units { - if u.Name == target { - unit = &u - } - } - - if unit != nil { - t.Fatalf("Test unit found in list, should be stopped") - } -} - -// Enables a unit and then immediately tears it down -func TestEnableDisableUnit(t *testing.T) { - target := "enable-disable.service" - conn := setupConn(t) - - setupUnit(target, conn, t) - - abs, err := filepath.Abs("../fixtures/" + target) - if err != nil { - t.Fatal(err) - } - - path := filepath.Join("/run/systemd/system/", target) - - // 2. Disable the unit - changes, err := conn.DisableUnitFiles([]string{abs}, true) - if err != nil { - t.Fatal(err) - } - - if len(changes) != 1 { - t.Fatalf("Changes should include the path, %v", changes) - } - if changes[0].Filename != path { - t.Fatalf("Change should include correct filename, %+v", changes[0]) - } - if changes[0].Destination != "" { - t.Fatalf("Change destination should be empty, %+v", changes[0]) - } -} - -// TestGetUnitProperties reads the `-.mount` which should exist on all systemd -// systems and ensures that one of its properties is valid. -func TestGetUnitProperties(t *testing.T) { - conn := setupConn(t) - - unit := "-.mount" - - info, err := conn.GetUnitProperties(unit) - if err != nil { - t.Fatal(err) - } - - names := info["Wants"].([]string) - - if len(names) < 1 { - t.Fatal("/ is unwanted") - } - - if names[0] != "system.slice" { - t.Fatal("unexpected wants for /") - } - - prop, err := conn.GetUnitProperty(unit, "Wants") - if err != nil { - t.Fatal(err) - } - - if prop.Name != "Wants" { - t.Fatal("unexpected property name") - } - - val := prop.Value.Value().([]string) - if !reflect.DeepEqual(val, names) { - t.Fatal("unexpected property value") - } -} - -// TestGetUnitPropertiesRejectsInvalidName attempts to get the properties for a -// unit with an invalid name. This test should be run with --test.timeout set, -// as a fail will manifest as GetUnitProperties hanging indefinitely. -func TestGetUnitPropertiesRejectsInvalidName(t *testing.T) { - conn := setupConn(t) - - unit := "//invalid#$^/" - - _, err := conn.GetUnitProperties(unit) - if err == nil { - t.Fatal("Expected an error, got nil") - } - - _, err = conn.GetUnitProperty(unit, "Wants") - if err == nil { - t.Fatal("Expected an error, got nil") - } -} - -// TestSetUnitProperties changes a cgroup setting on the `tmp.mount` -// which should exist on all systemd systems and ensures that the -// property was set. -func TestSetUnitProperties(t *testing.T) { - conn := setupConn(t) - - unit := "tmp.mount" - - if err := conn.SetUnitProperties(unit, true, Property{"CPUShares", dbus.MakeVariant(uint64(1023))}); err != nil { - t.Fatal(err) - } - - info, err := conn.GetUnitTypeProperties(unit, "Mount") - if err != nil { - t.Fatal(err) - } - - value := info["CPUShares"].(uint64) - if value != 1023 { - t.Fatal("CPUShares of unit is not 1023, %s", value) - } -} - -// Ensure that basic transient unit starting and stopping works. -func TestStartStopTransientUnit(t *testing.T) { - conn := setupConn(t) - - props := []Property{ - PropExecStart([]string{"/bin/sleep", "400"}, false), - } - target := fmt.Sprintf("testing-transient-%d.service", rand.Int()) - - // Start the unit - job, err := conn.StartTransientUnit(target, "replace", props...) - if err != nil { - t.Fatal(err) - } - - if job != "done" { - t.Fatal("Job is not done, %v", job) - } - - units, err := conn.ListUnits() - - var unit *UnitStatus - for _, u := range units { - if u.Name == target { - unit = &u - } - } - - if unit == nil { - t.Fatalf("Test unit not found in list") - } - - if unit.ActiveState != "active" { - t.Fatalf("Test unit not active") - } - - // 3. Stop the unit - job, err = conn.StopUnit(target, "replace") - if err != nil { - t.Fatal(err) - } - - units, err = conn.ListUnits() - - unit = nil - for _, u := range units { - if u.Name == target { - unit = &u - } - } - - if unit != nil { - t.Fatalf("Test unit found in list, should be stopped") - } -} - -func TestConnJobListener(t *testing.T) { - target := "start-stop.service" - conn := setupConn(t) - - setupUnit(target, conn, t) - - jobSize := len(conn.jobListener.jobs) - - _, err := conn.StartUnit(target, "replace") - if err != nil { - t.Fatal(err) - } - - _, err = conn.StopUnit(target, "replace") - if err != nil { - t.Fatal(err) - } - - currentJobSize := len(conn.jobListener.jobs) - if jobSize != currentJobSize { - t.Fatal("JobListener jobs leaked") - } -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go index 83dc1ab6..75200115 100644 --- a/vendor/github.com/coreos/go-systemd/dbus/properties.go +++ b/vendor/github.com/coreos/go-systemd/dbus/properties.go @@ -1,23 +1,21 @@ -/* -Copyright 2013 CoreOS Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package dbus import ( - "github.com/guelfey/go.dbus" + "github.com/godbus/dbus" ) // From the systemd docs: diff --git a/vendor/github.com/coreos/go-systemd/dbus/set.go b/vendor/github.com/coreos/go-systemd/dbus/set.go index 88378b29..f92e6fbe 100644 --- a/vendor/github.com/coreos/go-systemd/dbus/set.go +++ b/vendor/github.com/coreos/go-systemd/dbus/set.go @@ -1,3 +1,17 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package dbus type set struct { @@ -17,10 +31,17 @@ func (s *set) Contains(value string) (exists bool) { return } -func (s *set) Length() (int) { +func (s *set) Length() int { return len(s.data) } -func newSet() (*set) { - return &set{make(map[string] bool)} +func (s *set) Values() (values []string) { + for val, _ := range s.data { + values = append(values, val) + } + return +} + +func newSet() *set { + return &set{make(map[string]bool)} } diff --git a/vendor/github.com/coreos/go-systemd/dbus/set_test.go b/vendor/github.com/coreos/go-systemd/dbus/set_test.go deleted file mode 100644 index d8d174d0..00000000 --- a/vendor/github.com/coreos/go-systemd/dbus/set_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package dbus - -import ( - "testing" -) - -// TestBasicSetActions asserts that Add & Remove behavior is correct -func TestBasicSetActions(t *testing.T) { - s := newSet() - - if s.Contains("foo") { - t.Fatal("set should not contain 'foo'") - } - - s.Add("foo") - - if !s.Contains("foo") { - t.Fatal("set should contain 'foo'") - } - - s.Remove("foo") - - if s.Contains("foo") { - t.Fatal("set should not contain 'foo'") - } -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go index f538fdba..99645144 100644 --- a/vendor/github.com/coreos/go-systemd/dbus/subscription.go +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription.go @@ -1,18 +1,16 @@ -/* -Copyright 2013 CoreOS Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package dbus @@ -20,7 +18,7 @@ import ( "errors" "time" - "github.com/guelfey/go.dbus" + "github.com/godbus/dbus" ) const ( @@ -33,14 +31,13 @@ const ( // systemd will automatically stop sending signals so there is no need to // explicitly call Unsubscribe(). func (c *Conn) Subscribe() error { - c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") - c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() + err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() if err != nil { - c.sysconn.Close() return err } @@ -49,45 +46,52 @@ func (c *Conn) Subscribe() error { // Unsubscribe this connection from systemd dbus events. func (c *Conn) Unsubscribe() error { - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() + err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() if err != nil { - c.sysconn.Close() return err } return nil } -func (c *Conn) initSubscription() { - c.subscriber.ignore = make(map[dbus.ObjectPath]int64) -} - -func (c *Conn) initDispatch() { +func (c *Conn) dispatch() { ch := make(chan *dbus.Signal, signalBuffer) - c.sysconn.Signal(ch) + c.sigconn.Signal(ch) go func() { for { - signal := <-ch + signal, ok := <-ch + if !ok { + return + } + + if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { + c.jobComplete(signal) + } + + if c.subscriber.updateCh == nil { + continue + } + + var unitPath dbus.ObjectPath switch signal.Name { case "org.freedesktop.systemd1.Manager.JobRemoved": - c.jobComplete(signal) - unitName := signal.Body[2].(string) - var unitPath dbus.ObjectPath c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) - if unitPath != dbus.ObjectPath("") { - c.sendSubStateUpdate(unitPath) - } case "org.freedesktop.systemd1.Manager.UnitNew": - c.sendSubStateUpdate(signal.Body[1].(dbus.ObjectPath)) + unitPath = signal.Body[1].(dbus.ObjectPath) case "org.freedesktop.DBus.Properties.PropertiesChanged": if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { - // we only care about SubState updates, which are a Unit property - c.sendSubStateUpdate(signal.Path) + unitPath = signal.Path } } + + if unitPath == dbus.ObjectPath("") { + continue + } + + c.sendSubStateUpdate(unitPath) } }() } @@ -101,7 +105,7 @@ func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitSt // SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer // size of the channels, the comparison function for detecting changes and a filter // function for cutting down on the noise that your channel receives. -func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func (string) bool) (<-chan map[string]*UnitStatus, <-chan error) { +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { old := make(map[string]*UnitStatus) statusChan := make(chan map[string]*UnitStatus, buffer) errChan := make(chan error, buffer) @@ -174,9 +178,6 @@ func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) { c.subscriber.Lock() defer c.subscriber.Unlock() - if c.subscriber.updateCh == nil { - return - } if c.shouldIgnore(path) { return diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go index 26257860..5b408d58 100644 --- a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go @@ -1,3 +1,17 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package dbus import ( @@ -11,7 +25,6 @@ type SubscriptionSet struct { conn *Conn } - func (s *SubscriptionSet) filter(unit string) bool { return !s.Contains(unit) } @@ -21,12 +34,24 @@ func (s *SubscriptionSet) filter(unit string) bool { func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { // TODO: Make fully evented by using systemd 209 with properties changed values return s.conn.SubscribeUnitsCustom(time.Second, 0, - func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, + mismatchUnitStatus, func(unit string) bool { return s.filter(unit) }, ) } // NewSubscriptionSet returns a new subscription set. -func (conn *Conn) NewSubscriptionSet() (*SubscriptionSet) { +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { return &SubscriptionSet{newSet(), conn} } + +// mismatchUnitStatus returns true if the provided UnitStatus objects +// are not equivalent. false is returned if the objects are equivalent. +// Only the Name, Description and state-related fields are used in +// the comparison. +func mismatchUnitStatus(u1, u2 *UnitStatus) bool { + return u1.Name != u2.Name || + u1.Description != u2.Description || + u1.LoadState != u2.LoadState || + u1.ActiveState != u2.ActiveState || + u1.SubState != u2.SubState +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set_test.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set_test.go deleted file mode 100644 index db600850..00000000 --- a/vendor/github.com/coreos/go-systemd/dbus/subscription_set_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package dbus - -import ( - "testing" - "time" -) - -// TestSubscribeUnit exercises the basics of subscription of a particular unit. -func TestSubscriptionSetUnit(t *testing.T) { - target := "subscribe-events-set.service" - - conn, err := New() - - if err != nil { - t.Fatal(err) - } - - err = conn.Subscribe() - if err != nil { - t.Fatal(err) - } - - subSet := conn.NewSubscriptionSet() - evChan, errChan := subSet.Subscribe() - - subSet.Add(target) - setupUnit(target, conn, t) - - job, err := conn.StartUnit(target, "replace") - if err != nil { - t.Fatal(err) - } - - if job != "done" { - t.Fatal("Couldn't start", target) - } - - timeout := make(chan bool, 1) - go func() { - time.Sleep(3 * time.Second) - close(timeout) - }() - - for { - select { - case changes := <-evChan: - tCh, ok := changes[target] - - if !ok { - t.Fatal("Unexpected event %v", changes) - } - - if tCh.ActiveState == "active" && tCh.Name == target { - goto success - } - case err = <-errChan: - t.Fatal(err) - case <-timeout: - t.Fatal("Reached timeout") - } - } - -success: - return -} - - diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_test.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_test.go deleted file mode 100644 index 6f4d0b32..00000000 --- a/vendor/github.com/coreos/go-systemd/dbus/subscription_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package dbus - -import ( - "testing" - "time" -) - -// TestSubscribe exercises the basics of subscription -func TestSubscribe(t *testing.T) { - conn, err := New() - - if err != nil { - t.Fatal(err) - } - - err = conn.Subscribe() - if err != nil { - t.Fatal(err) - } - - err = conn.Unsubscribe() - if err != nil { - t.Fatal(err) - } -} - -// TestSubscribeUnit exercises the basics of subscription of a particular unit. -func TestSubscribeUnit(t *testing.T) { - target := "subscribe-events.service" - - conn, err := New() - - if err != nil { - t.Fatal(err) - } - - err = conn.Subscribe() - if err != nil { - t.Fatal(err) - } - - err = conn.Unsubscribe() - if err != nil { - t.Fatal(err) - } - - evChan, errChan := conn.SubscribeUnits(time.Second) - - setupUnit(target, conn, t) - - job, err := conn.StartUnit(target, "replace") - if err != nil { - t.Fatal(err) - } - - if job != "done" { - t.Fatal("Couldn't start", target) - } - - timeout := make(chan bool, 1) - go func() { - time.Sleep(3 * time.Second) - close(timeout) - }() - - for { - select { - case changes := <-evChan: - tCh, ok := changes[target] - - // Just continue until we see our event. - if !ok { - continue - } - - if tCh.ActiveState == "active" && tCh.Name == target { - goto success - } - case err = <-errChan: - t.Fatal(err) - case <-timeout: - t.Fatal("Reached timeout") - } - } - -success: - return -} - - diff --git a/vendor/github.com/coreos/go-systemd/test b/vendor/github.com/coreos/go-systemd/test index 6e043658..bc1b9859 100755 --- a/vendor/github.com/coreos/go-systemd/test +++ b/vendor/github.com/coreos/go-systemd/test @@ -1,3 +1,76 @@ -#!/bin/sh -e +#!/bin/bash -e +# +# Run all tests +# ./test +# ./test -v +# +# Run tests for one package +# PKG=./foo ./test +# PKG=bar ./test +# -go test -v ./... +# Invoke ./cover for HTML output +COVER=${COVER:-"-cover"} + +PROJ="go-systemd" +ORG_PATH="github.com/coreos" +REPO_PATH="${ORG_PATH}/${PROJ}" + +# As a convenience, set up a self-contained GOPATH if none set +if [ -z "$GOPATH" ]; then + if [ ! -h gopath/src/${REPO_PATH} ]; then + mkdir -p gopath/src/${ORG_PATH} + ln -s ../../../.. gopath/src/${REPO_PATH} || exit 255 + fi + export GOPATH=${PWD}/gopath + go get -u github.com/godbus/dbus +fi + +TESTABLE="activation journal login1 machine1 unit" +FORMATTABLE="$TESTABLE sdjournal dbus" +if [ -e "/run/systemd/system/" ]; then + TESTABLE="${TESTABLE} sdjournal" + if [ "$EUID" == "0" ]; then + # testing actual systemd behaviour requires root + TESTABLE="${TESTABLE} dbus" + fi +fi + + +# user has not provided PKG override +if [ -z "$PKG" ]; then + TEST=$TESTABLE + FMT=$FORMATTABLE + +# user has provided PKG override +else + # strip out slashes and dots from PKG=./foo/ + TEST=${PKG//\//} + TEST=${TEST//./} + + # only run gofmt on packages provided by user + FMT="$TEST" +fi + +# split TEST into an array and prepend REPO_PATH to each local package +split=(${TEST// / }) +TEST=${split[@]/#/${REPO_PATH}/} + +echo "Running tests..." +go test ${COVER} $@ ${TEST} + +echo "Checking gofmt..." +fmtRes=$(gofmt -l $FMT) +if [ -n "${fmtRes}" ]; then + echo -e "gofmt checking failed:\n${fmtRes}" + exit 255 +fi + +echo "Checking govet..." +vetRes=$(go vet $TEST) +if [ -n "${vetRes}" ]; then + echo -e "govet checking failed:\n${vetRes}" + exit 255 +fi + +echo "Success" diff --git a/vendor/github.com/coreos/go-systemd/util/util.go b/vendor/github.com/coreos/go-systemd/util/util.go new file mode 100644 index 00000000..33832a1e --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/util/util.go @@ -0,0 +1,33 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package util contains utility functions related to systemd that applications +// can use to check things like whether systemd is running. +package util + +import ( + "os" +) + +// IsRunningSystemd checks whether the host was booted with systemd as its init +// system. This functions similar to systemd's `sd_booted(3)`: internally, it +// checks whether /run/systemd/system/ exists and is a directory. +// http://www.freedesktop.org/software/systemd/man/sd_booted.html +func IsRunningSystemd() bool { + fi, err := os.Lstat("/run/systemd/system") + if err != nil { + return false + } + return fi.IsDir() +} diff --git a/vendor/github.com/coreos/yaml/decode_test.go b/vendor/github.com/coreos/yaml/decode_test.go deleted file mode 100644 index 349bee7d..00000000 --- a/vendor/github.com/coreos/yaml/decode_test.go +++ /dev/null @@ -1,720 +0,0 @@ -package yaml_test - -import ( - "github.com/coreos/yaml" - . "gopkg.in/check.v1" - "math" - "reflect" - "strings" - "time" -) - -var unmarshalIntTest = 123 - -var unmarshalTests = []struct { - data string - value interface{} -}{ - { - "", - &struct{}{}, - }, { - "{}", &struct{}{}, - }, { - "v: hi", - map[string]string{"v": "hi"}, - }, { - "v: hi", map[string]interface{}{"v": "hi"}, - }, { - "v: true", - map[string]string{"v": "true"}, - }, { - "v: true", - map[string]interface{}{"v": true}, - }, { - "v: 10", - map[string]interface{}{"v": 10}, - }, { - "v: 0b10", - map[string]interface{}{"v": 2}, - }, { - "v: 0xA", - map[string]interface{}{"v": 10}, - }, { - "v: 4294967296", - map[string]int64{"v": 4294967296}, - }, { - "v: 0.1", - map[string]interface{}{"v": 0.1}, - }, { - "v: .1", - map[string]interface{}{"v": 0.1}, - }, { - "v: .Inf", - map[string]interface{}{"v": math.Inf(+1)}, - }, { - "v: -.Inf", - map[string]interface{}{"v": math.Inf(-1)}, - }, { - "v: -10", - map[string]interface{}{"v": -10}, - }, { - "v: -.1", - map[string]interface{}{"v": -0.1}, - }, - - // Simple values. - { - "123", - &unmarshalIntTest, - }, - - // Floats from spec - { - "canonical: 6.8523e+5", - map[string]interface{}{"canonical": 6.8523e+5}, - }, { - "expo: 685.230_15e+03", - map[string]interface{}{"expo": 685.23015e+03}, - }, { - "fixed: 685_230.15", - map[string]interface{}{"fixed": 685230.15}, - }, { - "neginf: -.inf", - map[string]interface{}{"neginf": math.Inf(-1)}, - }, { - "fixed: 685_230.15", - map[string]float64{"fixed": 685230.15}, - }, - //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported - //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. - - // Bools from spec - { - "canonical: y", - map[string]interface{}{"canonical": true}, - }, { - "answer: NO", - map[string]interface{}{"answer": false}, - }, { - "logical: True", - map[string]interface{}{"logical": true}, - }, { - "option: on", - map[string]interface{}{"option": true}, - }, { - "option: on", - map[string]bool{"option": true}, - }, - // Ints from spec - { - "canonical: 685230", - map[string]interface{}{"canonical": 685230}, - }, { - "decimal: +685_230", - map[string]interface{}{"decimal": 685230}, - }, { - "octal: 02472256", - map[string]interface{}{"octal": 685230}, - }, { - "hexa: 0x_0A_74_AE", - map[string]interface{}{"hexa": 685230}, - }, { - "bin: 0b1010_0111_0100_1010_1110", - map[string]interface{}{"bin": 685230}, - }, { - "bin: -0b101010", - map[string]interface{}{"bin": -42}, - }, { - "decimal: +685_230", - map[string]int{"decimal": 685230}, - }, - - //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported - - // Nulls from spec - { - "empty:", - map[string]interface{}{"empty": nil}, - }, { - "canonical: ~", - map[string]interface{}{"canonical": nil}, - }, { - "english: null", - map[string]interface{}{"english": nil}, - }, { - "~: null key", - map[interface{}]string{nil: "null key"}, - }, { - "empty:", - map[string]*bool{"empty": nil}, - }, - - // Flow sequence - { - "seq: [A,B]", - map[string]interface{}{"seq": []interface{}{"A", "B"}}, - }, { - "seq: [A,B,C,]", - map[string][]string{"seq": []string{"A", "B", "C"}}, - }, { - "seq: [A,1,C]", - map[string][]string{"seq": []string{"A", "1", "C"}}, - }, { - "seq: [A,1,C]", - map[string][]int{"seq": []int{1}}, - }, { - "seq: [A,1,C]", - map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, - }, - // Block sequence - { - "seq:\n - A\n - B", - map[string]interface{}{"seq": []interface{}{"A", "B"}}, - }, { - "seq:\n - A\n - B\n - C", - map[string][]string{"seq": []string{"A", "B", "C"}}, - }, { - "seq:\n - A\n - 1\n - C", - map[string][]string{"seq": []string{"A", "1", "C"}}, - }, { - "seq:\n - A\n - 1\n - C", - map[string][]int{"seq": []int{1}}, - }, { - "seq:\n - A\n - 1\n - C", - map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, - }, - - // Literal block scalar - { - "scalar: | # Comment\n\n literal\n\n \ttext\n\n", - map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, - }, - - // Folded block scalar - { - "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", - map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, - }, - - // Map inside interface with no type hints. - { - "a: {b: c}", - map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, - }, - - // Structs and type conversions. - { - "hello: world", - &struct{ Hello string }{"world"}, - }, { - "a: {b: c}", - &struct{ A struct{ B string } }{struct{ B string }{"c"}}, - }, { - "a: {b: c}", - &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, - }, { - "a: {b: c}", - &struct{ A map[string]string }{map[string]string{"b": "c"}}, - }, { - "a: {b: c}", - &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, - }, { - "a:", - &struct{ A map[string]string }{}, - }, { - "a: 1", - &struct{ A int }{1}, - }, { - "a: 1", - &struct{ A float64 }{1}, - }, { - "a: 1.0", - &struct{ A int }{1}, - }, { - "a: 1.0", - &struct{ A uint }{1}, - }, { - "a: [1, 2]", - &struct{ A []int }{[]int{1, 2}}, - }, { - "a: 1", - &struct{ B int }{0}, - }, { - "a: 1", - &struct { - B int "a" - }{1}, - }, { - "a: y", - &struct{ A bool }{true}, - }, - - // Some cross type conversions - { - "v: 42", - map[string]uint{"v": 42}, - }, { - "v: -42", - map[string]uint{}, - }, { - "v: 4294967296", - map[string]uint64{"v": 4294967296}, - }, { - "v: -4294967296", - map[string]uint64{}, - }, - - // Overflow cases. - { - "v: 4294967297", - map[string]int32{}, - }, { - "v: 128", - map[string]int8{}, - }, - - // Quoted values. - { - "'1': '\"2\"'", - map[interface{}]interface{}{"1": "\"2\""}, - }, { - "v:\n- A\n- 'B\n\n C'\n", - map[string][]string{"v": []string{"A", "B\nC"}}, - }, - - // Explicit tags. - { - "v: !!float '1.1'", - map[string]interface{}{"v": 1.1}, - }, { - "v: !!null ''", - map[string]interface{}{"v": nil}, - }, { - "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", - map[string]interface{}{"v": 1}, - }, - - // Anchors and aliases. - { - "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", - &struct{ A, B, C, D int }{1, 2, 1, 2}, - }, { - "a: &a {c: 1}\nb: *a", - &struct { - A, B struct { - C int - } - }{struct{ C int }{1}, struct{ C int }{1}}, - }, { - "a: &a [1, 2]\nb: *a", - &struct{ B []int }{[]int{1, 2}}, - }, - - // Bug #1133337 - { - "foo: ''", - map[string]*string{"foo": new(string)}, - }, { - "foo: null", - map[string]string{"foo": ""}, - }, { - "foo: null", - map[string]interface{}{"foo": nil}, - }, - - // Ignored field - { - "a: 1\nb: 2\n", - &struct { - A int - B int "-" - }{1, 0}, - }, - - // Bug #1191981 - { - "" + - "%YAML 1.1\n" + - "--- !!str\n" + - `"Generic line break (no glyph)\n\` + "\n" + - ` Generic line break (glyphed)\n\` + "\n" + - ` Line separator\u2028\` + "\n" + - ` Paragraph separator\u2029"` + "\n", - "" + - "Generic line break (no glyph)\n" + - "Generic line break (glyphed)\n" + - "Line separator\u2028Paragraph separator\u2029", - }, - - // Struct inlining - { - "a: 1\nb: 2\nc: 3\n", - &struct { - A int - C inlineB `yaml:",inline"` - }{1, inlineB{2, inlineC{3}}}, - }, - - // bug 1243827 - { - "a: -b_c", - map[string]interface{}{"a": "-b_c"}, - }, - { - "a: +b_c", - map[string]interface{}{"a": "+b_c"}, - }, - { - "a: 50cent_of_dollar", - map[string]interface{}{"a": "50cent_of_dollar"}, - }, - - // Duration - { - "a: 3s", - map[string]time.Duration{"a": 3 * time.Second}, - }, - - // Issue #24. - { - "a: ", - map[string]string{"a": ""}, - }, - - // Base 60 floats are obsolete and unsupported. - { - "a: 1:1\n", - map[string]string{"a": "1:1"}, - }, - - // Binary data. - { - "a: !!binary gIGC\n", - map[string]string{"a": "\x80\x81\x82"}, - }, { - "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", - map[string]string{"a": strings.Repeat("\x90", 54)}, - }, { - "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", - map[string]string{"a": strings.Repeat("\x00", 52)}, - }, -} - -type inlineB struct { - B int - inlineC `yaml:",inline"` -} - -type inlineC struct { - C int -} - -func (s *S) TestUnmarshal(c *C) { - for i, item := range unmarshalTests { - t := reflect.ValueOf(item.value).Type() - var value interface{} - switch t.Kind() { - case reflect.Map: - value = reflect.MakeMap(t).Interface() - case reflect.String: - t := reflect.ValueOf(item.value).Type() - v := reflect.New(t) - value = v.Interface() - default: - pt := reflect.ValueOf(item.value).Type() - pv := reflect.New(pt.Elem()) - value = pv.Interface() - } - err := yaml.Unmarshal([]byte(item.data), value) - c.Assert(err, IsNil, Commentf("Item #%d", i)) - if t.Kind() == reflect.String { - c.Assert(*value.(*string), Equals, item.value, Commentf("Item #%d", i)) - } else { - c.Assert(value, DeepEquals, item.value, Commentf("Item #%d", i)) - } - } -} - -func (s *S) TestUnmarshalNaN(c *C) { - value := map[string]interface{}{} - err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) - c.Assert(err, IsNil) - c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) -} - -var unmarshalErrorTests = []struct { - data, error string -}{ - {"v: !!float 'error'", "YAML error: cannot decode !!str `error` as a !!float"}, - {"v: [A,", "YAML error: line 1: did not find expected node content"}, - {"v:\n- [A,", "YAML error: line 2: did not find expected node content"}, - {"a: *b\n", "YAML error: Unknown anchor 'b' referenced"}, - {"a: &a\n b: *a\n", "YAML error: Anchor 'a' value contains itself"}, - {"value: -", "YAML error: block sequence entries are not allowed in this context"}, - {"a: !!binary ==", "YAML error: !!binary value contains invalid base64 data"}, - {"{[.]}", `YAML error: invalid map key: \[\]interface \{\}\{"\."\}`}, - {"{{.}}", `YAML error: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, -} - -func (s *S) TestUnmarshalErrors(c *C) { - for _, item := range unmarshalErrorTests { - var value interface{} - err := yaml.Unmarshal([]byte(item.data), &value) - c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) - } -} - -var setterTests = []struct { - data, tag string - value interface{} -}{ - {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, - {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, - {"_: 10", "!!int", 10}, - {"_: null", "!!null", nil}, - {`_: BAR!`, "!!str", "BAR!"}, - {`_: "BAR!"`, "!!str", "BAR!"}, - {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, -} - -var setterResult = map[int]bool{} - -type typeWithSetter struct { - tag string - value interface{} -} - -func (o *typeWithSetter) SetYAML(tag string, value interface{}) (ok bool) { - o.tag = tag - o.value = value - if i, ok := value.(int); ok { - if result, ok := setterResult[i]; ok { - return result - } - } - return true -} - -type setterPointerType struct { - Field *typeWithSetter "_" -} - -type setterValueType struct { - Field typeWithSetter "_" -} - -func (s *S) TestUnmarshalWithPointerSetter(c *C) { - for _, item := range setterTests { - obj := &setterPointerType{} - err := yaml.Unmarshal([]byte(item.data), obj) - c.Assert(err, IsNil) - c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) - c.Assert(obj.Field.tag, Equals, item.tag) - c.Assert(obj.Field.value, DeepEquals, item.value) - } -} - -func (s *S) TestUnmarshalWithValueSetter(c *C) { - for _, item := range setterTests { - obj := &setterValueType{} - err := yaml.Unmarshal([]byte(item.data), obj) - c.Assert(err, IsNil) - c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) - c.Assert(obj.Field.tag, Equals, item.tag) - c.Assert(obj.Field.value, DeepEquals, item.value) - } -} - -func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) { - obj := &typeWithSetter{} - err := yaml.Unmarshal([]byte(setterTests[0].data), obj) - c.Assert(err, IsNil) - c.Assert(obj.tag, Equals, setterTests[0].tag) - value, ok := obj.value.(map[interface{}]interface{}) - c.Assert(ok, Equals, true) - c.Assert(value["_"], DeepEquals, setterTests[0].value) -} - -func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) { - setterResult[2] = false - setterResult[4] = false - defer func() { - delete(setterResult, 2) - delete(setterResult, 4) - }() - - m := map[string]*typeWithSetter{} - data := `{abc: 1, def: 2, ghi: 3, jkl: 4}` - err := yaml.Unmarshal([]byte(data), m) - c.Assert(err, IsNil) - c.Assert(m["abc"], NotNil) - c.Assert(m["def"], IsNil) - c.Assert(m["ghi"], NotNil) - c.Assert(m["jkl"], IsNil) - - c.Assert(m["abc"].value, Equals, 1) - c.Assert(m["ghi"].value, Equals, 3) -} - -func (s *S) TestUnmarshalWithTransform(c *C) { - data := `{a_b: 1, c-d: 2, e-f_g: 3, h_i-j: 4}` - expect := map[string]int{ - "a_b": 1, - "c_d": 2, - "e_f_g": 3, - "h_i_j": 4, - } - m := map[string]int{} - yaml.UnmarshalMappingKeyTransform = func(i string) string { - return strings.Replace(i, "-", "_", -1) - } - err := yaml.Unmarshal([]byte(data), m) - c.Assert(err, IsNil) - c.Assert(m, DeepEquals, expect) -} - -// From http://yaml.org/type/merge.html -var mergeTests = ` -anchors: - - &CENTER { "x": 1, "y": 2 } - - &LEFT { "x": 0, "y": 2 } - - &BIG { "r": 10 } - - &SMALL { "r": 1 } - -# All the following maps are equal: - -plain: - # Explicit keys - "x": 1 - "y": 2 - "r": 10 - label: center/big - -mergeOne: - # Merge one map - << : *CENTER - "r": 10 - label: center/big - -mergeMultiple: - # Merge multiple maps - << : [ *CENTER, *BIG ] - label: center/big - -override: - # Override - << : [ *BIG, *LEFT, *SMALL ] - "x": 1 - label: center/big - -shortTag: - # Explicit short merge tag - !!merge "<<" : [ *CENTER, *BIG ] - label: center/big - -longTag: - # Explicit merge long tag - ! "<<" : [ *CENTER, *BIG ] - label: center/big - -inlineMap: - # Inlined map - << : {"x": 1, "y": 2, "r": 10} - label: center/big - -inlineSequenceMap: - # Inlined map in sequence - << : [ *CENTER, {"r": 10} ] - label: center/big -` - -func (s *S) TestMerge(c *C) { - var want = map[interface{}]interface{}{ - "x": 1, - "y": 2, - "r": 10, - "label": "center/big", - } - - var m map[string]interface{} - err := yaml.Unmarshal([]byte(mergeTests), &m) - c.Assert(err, IsNil) - for name, test := range m { - if name == "anchors" { - continue - } - c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) - } -} - -func (s *S) TestMergeStruct(c *C) { - type Data struct { - X, Y, R int - Label string - } - want := Data{1, 2, 10, "center/big"} - - var m map[string]Data - err := yaml.Unmarshal([]byte(mergeTests), &m) - c.Assert(err, IsNil) - for name, test := range m { - if name == "anchors" { - continue - } - c.Assert(test, Equals, want, Commentf("test %q failed", name)) - } -} - -var unmarshalNullTests = []func() interface{}{ - func() interface{} { var v interface{}; v = "v"; return &v }, - func() interface{} { var s = "s"; return &s }, - func() interface{} { var s = "s"; sptr := &s; return &sptr }, - func() interface{} { var i = 1; return &i }, - func() interface{} { var i = 1; iptr := &i; return &iptr }, - func() interface{} { m := map[string]int{"s": 1}; return &m }, - func() interface{} { m := map[string]int{"s": 1}; return m }, -} - -func (s *S) TestUnmarshalNull(c *C) { - for _, test := range unmarshalNullTests { - item := test() - zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() - err := yaml.Unmarshal([]byte("null"), item) - c.Assert(err, IsNil) - if reflect.TypeOf(item).Kind() == reflect.Map { - c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) - } else { - c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) - } - } -} - -//var data []byte -//func init() { -// var err error -// data, err = ioutil.ReadFile("/tmp/file.yaml") -// if err != nil { -// panic(err) -// } -//} -// -//func (s *S) BenchmarkUnmarshal(c *C) { -// var err error -// for i := 0; i < c.N; i++ { -// var v map[string]interface{} -// err = yaml.Unmarshal(data, &v) -// } -// if err != nil { -// panic(err) -// } -//} -// -//func (s *S) BenchmarkMarshal(c *C) { -// var v map[string]interface{} -// yaml.Unmarshal(data, &v) -// c.ResetTimer() -// for i := 0; i < c.N; i++ { -// yaml.Marshal(&v) -// } -//} diff --git a/vendor/github.com/coreos/yaml/encode_test.go b/vendor/github.com/coreos/yaml/encode_test.go deleted file mode 100644 index 2cd0ea7f..00000000 --- a/vendor/github.com/coreos/yaml/encode_test.go +++ /dev/null @@ -1,433 +0,0 @@ -package yaml_test - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" - - "github.com/coreos/yaml" - . "gopkg.in/check.v1" -) - -var marshalIntTest = 123 - -var marshalTests = []struct { - value interface{} - data string -}{ - { - nil, - "null\n", - }, { - &struct{}{}, - "{}\n", - }, { - map[string]string{"v": "hi"}, - "v: hi\n", - }, { - map[string]interface{}{"v": "hi"}, - "v: hi\n", - }, { - map[string]string{"v": "true"}, - "v: \"true\"\n", - }, { - map[string]string{"v": "false"}, - "v: \"false\"\n", - }, { - map[string]interface{}{"v": true}, - "v: true\n", - }, { - map[string]interface{}{"v": false}, - "v: false\n", - }, { - map[string]interface{}{"v": 10}, - "v: 10\n", - }, { - map[string]interface{}{"v": -10}, - "v: -10\n", - }, { - map[string]uint{"v": 42}, - "v: 42\n", - }, { - map[string]interface{}{"v": int64(4294967296)}, - "v: 4294967296\n", - }, { - map[string]int64{"v": int64(4294967296)}, - "v: 4294967296\n", - }, { - map[string]uint64{"v": 4294967296}, - "v: 4294967296\n", - }, { - map[string]interface{}{"v": "10"}, - "v: \"10\"\n", - }, { - map[string]interface{}{"v": 0.1}, - "v: 0.1\n", - }, { - map[string]interface{}{"v": float64(0.1)}, - "v: 0.1\n", - }, { - map[string]interface{}{"v": -0.1}, - "v: -0.1\n", - }, { - map[string]interface{}{"v": math.Inf(+1)}, - "v: .inf\n", - }, { - map[string]interface{}{"v": math.Inf(-1)}, - "v: -.inf\n", - }, { - map[string]interface{}{"v": math.NaN()}, - "v: .nan\n", - }, { - map[string]interface{}{"v": nil}, - "v: null\n", - }, { - map[string]interface{}{"v": ""}, - "v: \"\"\n", - }, { - map[string][]string{"v": []string{"A", "B"}}, - "v:\n- A\n- B\n", - }, { - map[string][]string{"v": []string{"A", "B\nC"}}, - "v:\n- A\n- |-\n B\n C\n", - }, { - map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, - "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", - }, { - map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, - "a:\n b: c\n", - }, { - map[string]interface{}{"a": "-"}, - "a: '-'\n", - }, - - // Simple values. - { - &marshalIntTest, - "123\n", - }, - - // Structures - { - &struct{ Hello string }{"world"}, - "hello: world\n", - }, { - &struct { - A struct { - B string - } - }{struct{ B string }{"c"}}, - "a:\n b: c\n", - }, { - &struct { - A *struct { - B string - } - }{&struct{ B string }{"c"}}, - "a:\n b: c\n", - }, { - &struct { - A *struct { - B string - } - }{}, - "a: null\n", - }, { - &struct{ A int }{1}, - "a: 1\n", - }, { - &struct{ A []int }{[]int{1, 2}}, - "a:\n- 1\n- 2\n", - }, { - &struct { - B int "a" - }{1}, - "a: 1\n", - }, { - &struct{ A bool }{true}, - "a: true\n", - }, - - // Conditional flag - { - &struct { - A int "a,omitempty" - B int "b,omitempty" - }{1, 0}, - "a: 1\n", - }, { - &struct { - A int "a,omitempty" - B int "b,omitempty" - }{0, 0}, - "{}\n", - }, { - &struct { - A *struct{ X int } "a,omitempty" - B int "b,omitempty" - }{nil, 0}, - "{}\n", - }, - - // Flow flag - { - &struct { - A []int "a,flow" - }{[]int{1, 2}}, - "a: [1, 2]\n", - }, { - &struct { - A map[string]string "a,flow" - }{map[string]string{"b": "c", "d": "e"}}, - "a: {b: c, d: e}\n", - }, { - &struct { - A struct { - B, D string - } "a,flow" - }{struct{ B, D string }{"c", "e"}}, - "a: {b: c, d: e}\n", - }, - - // Unexported field - { - &struct { - u int - A int - }{0, 1}, - "a: 1\n", - }, - - // Ignored field - { - &struct { - A int - B int "-" - }{1, 2}, - "a: 1\n", - }, - - // Struct inlining - { - &struct { - A int - C inlineB `yaml:",inline"` - }{1, inlineB{2, inlineC{3}}}, - "a: 1\nb: 2\nc: 3\n", - }, - - // Duration - { - map[string]time.Duration{"a": 3 * time.Second}, - "a: 3s\n", - }, - - // Issue #24: bug in map merging logic. - { - map[string]string{"a": ""}, - "a: \n", - }, - - // Issue #34: marshal unsupported base 60 floats quoted for compatibility - // with old YAML 1.1 parsers. - { - map[string]string{"a": "1:1"}, - "a: \"1:1\"\n", - }, - - // Binary data. - { - map[string]string{"a": "\x00"}, - "a: \"\\0\"\n", - }, { - map[string]string{"a": "\x80\x81\x82"}, - "a: !!binary gIGC\n", - }, { - map[string]string{"a": strings.Repeat("\x90", 54)}, - "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", - }, { - map[string]interface{}{"a": typeWithGetter{"!!str", "\x80\x81\x82"}}, - "a: !!binary gIGC\n", - }, - - // Escaping of tags. - { - map[string]interface{}{"a": typeWithGetter{"foo!bar", 1}}, - "a: ! 1\n", - }, -} - -func (s *S) TestMarshal(c *C) { - for _, item := range marshalTests { - data, err := yaml.Marshal(item.value) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, item.data) - } -} - -var marshalErrorTests = []struct { - value interface{} - error string - panic string -}{{ - value: &struct { - B int - inlineB ",inline" - }{1, inlineB{2, inlineC{3}}}, - panic: `Duplicated key 'b' in struct struct \{ B int; .*`, -}, { - value: typeWithGetter{"!!binary", "\x80"}, - error: "YAML error: explicitly tagged !!binary data must be base64-encoded", -}, { - value: typeWithGetter{"!!float", "\x80"}, - error: `YAML error: cannot marshal invalid UTF-8 data as !!float`, -}} - -func (s *S) TestMarshalErrors(c *C) { - for _, item := range marshalErrorTests { - if item.panic != "" { - c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) - } else { - _, err := yaml.Marshal(item.value) - c.Assert(err, ErrorMatches, item.error) - } - } -} - -var marshalTaggedIfaceTest interface{} = &struct{ A string }{"B"} - -var getterTests = []struct { - data, tag string - value interface{} -}{ - {"_:\n hi: there\n", "", map[interface{}]interface{}{"hi": "there"}}, - {"_:\n- 1\n- A\n", "", []interface{}{1, "A"}}, - {"_: 10\n", "", 10}, - {"_: null\n", "", nil}, - {"_: !foo BAR!\n", "!foo", "BAR!"}, - {"_: !foo 1\n", "!foo", "1"}, - {"_: !foo '\"1\"'\n", "!foo", "\"1\""}, - {"_: !foo 1.1\n", "!foo", 1.1}, - {"_: !foo 1\n", "!foo", 1}, - {"_: !foo 1\n", "!foo", uint(1)}, - {"_: !foo true\n", "!foo", true}, - {"_: !foo\n- A\n- B\n", "!foo", []string{"A", "B"}}, - {"_: !foo\n A: B\n", "!foo", map[string]string{"A": "B"}}, - {"_: !foo\n a: B\n", "!foo", &marshalTaggedIfaceTest}, -} - -func (s *S) TestMarshalTypeCache(c *C) { - var data []byte - var err error - func() { - type T struct{ A int } - data, err = yaml.Marshal(&T{}) - c.Assert(err, IsNil) - }() - func() { - type T struct{ B int } - data, err = yaml.Marshal(&T{}) - c.Assert(err, IsNil) - }() - c.Assert(string(data), Equals, "b: 0\n") -} - -type typeWithGetter struct { - tag string - value interface{} -} - -func (o typeWithGetter) GetYAML() (tag string, value interface{}) { - return o.tag, o.value -} - -type typeWithGetterField struct { - Field typeWithGetter "_" -} - -func (s *S) TestMashalWithGetter(c *C) { - for _, item := range getterTests { - obj := &typeWithGetterField{} - obj.Field.tag = item.tag - obj.Field.value = item.value - data, err := yaml.Marshal(obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, string(item.data)) - } -} - -func (s *S) TestUnmarshalWholeDocumentWithGetter(c *C) { - obj := &typeWithGetter{} - obj.tag = "" - obj.value = map[string]string{"hello": "world!"} - data, err := yaml.Marshal(obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "hello: world!\n") -} - -func (s *S) TestSortedOutput(c *C) { - order := []interface{}{ - false, - true, - 1, - uint(1), - 1.0, - 1.1, - 1.2, - 2, - uint(2), - 2.0, - 2.1, - "", - ".1", - ".2", - ".a", - "1", - "2", - "a!10", - "a/2", - "a/10", - "a~10", - "ab/1", - "b/1", - "b/01", - "b/2", - "b/02", - "b/3", - "b/03", - "b1", - "b01", - "b3", - "c2.10", - "c10.2", - "d1", - "d12", - "d12a", - } - m := make(map[interface{}]int) - for _, k := range order { - m[k] = 1 - } - data, err := yaml.Marshal(m) - c.Assert(err, IsNil) - out := "\n" + string(data) - last := 0 - for i, k := range order { - repr := fmt.Sprint(k) - if s, ok := k.(string); ok { - if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { - repr = `"` + repr + `"` - } - } - index := strings.Index(out, "\n"+repr+":") - if index == -1 { - c.Fatalf("%#v is not in the output: %#v", k, out) - } - if index < last { - c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) - } - last = index - } -} diff --git a/vendor/github.com/coreos/yaml/suite_test.go b/vendor/github.com/coreos/yaml/suite_test.go deleted file mode 100644 index c5cf1ed4..00000000 --- a/vendor/github.com/coreos/yaml/suite_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package yaml_test - -import ( - . "gopkg.in/check.v1" - "testing" -) - -func Test(t *testing.T) { TestingT(t) } - -type S struct{} - -var _ = Suite(&S{}) diff --git a/vendor/github.com/davecgh/go-spew/spew/common_test.go b/vendor/github.com/davecgh/go-spew/spew/common_test.go deleted file mode 100644 index 39b7525b..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/common_test.go +++ /dev/null @@ -1,298 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew_test - -import ( - "fmt" - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" -) - -// custom type to test Stinger interface on non-pointer receiver. -type stringer string - -// String implements the Stringer interface for testing invocation of custom -// stringers on types with non-pointer receivers. -func (s stringer) String() string { - return "stringer " + string(s) -} - -// custom type to test Stinger interface on pointer receiver. -type pstringer string - -// String implements the Stringer interface for testing invocation of custom -// stringers on types with only pointer receivers. -func (s *pstringer) String() string { - return "stringer " + string(*s) -} - -// xref1 and xref2 are cross referencing structs for testing circular reference -// detection. -type xref1 struct { - ps2 *xref2 -} -type xref2 struct { - ps1 *xref1 -} - -// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular -// reference for testing detection. -type indirCir1 struct { - ps2 *indirCir2 -} -type indirCir2 struct { - ps3 *indirCir3 -} -type indirCir3 struct { - ps1 *indirCir1 -} - -// embed is used to test embedded structures. -type embed struct { - a string -} - -// embedwrap is used to test embedded structures. -type embedwrap struct { - *embed - e *embed -} - -// panicer is used to intentionally cause a panic for testing spew properly -// handles them -type panicer int - -func (p panicer) String() string { - panic("test panic") -} - -// customError is used to test custom error interface invocation. -type customError int - -func (e customError) Error() string { - return fmt.Sprintf("error: %d", int(e)) -} - -// stringizeWants converts a slice of wanted test output into a format suitable -// for a test error message. -func stringizeWants(wants []string) string { - s := "" - for i, want := range wants { - if i > 0 { - s += fmt.Sprintf("want%d: %s", i+1, want) - } else { - s += "want: " + want - } - } - return s -} - -// testFailed returns whether or not a test failed by checking if the result -// of the test is in the slice of wanted strings. -func testFailed(result string, wants []string) bool { - for _, want := range wants { - if result == want { - return false - } - } - return true -} - -type sortableStruct struct { - x int -} - -func (ss sortableStruct) String() string { - return fmt.Sprintf("ss.%d", ss.x) -} - -type unsortableStruct struct { - x int -} - -type sortTestCase struct { - input []reflect.Value - expected []reflect.Value -} - -func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) { - getInterfaces := func(values []reflect.Value) []interface{} { - interfaces := []interface{}{} - for _, v := range values { - interfaces = append(interfaces, v.Interface()) - } - return interfaces - } - - for _, test := range tests { - spew.SortValues(test.input, cs) - // reflect.DeepEqual cannot really make sense of reflect.Value, - // probably because of all the pointer tricks. For instance, - // v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{} - // instead. - input := getInterfaces(test.input) - expected := getInterfaces(test.expected) - if !reflect.DeepEqual(input, expected) { - t.Errorf("Sort mismatch:\n %v != %v", input, expected) - } - } -} - -// TestSortValues ensures the sort functionality for relect.Value based sorting -// works as intended. -func TestSortValues(t *testing.T) { - v := reflect.ValueOf - - a := v("a") - b := v("b") - c := v("c") - embedA := v(embed{"a"}) - embedB := v(embed{"b"}) - embedC := v(embed{"c"}) - tests := []sortTestCase{ - // No values. - { - []reflect.Value{}, - []reflect.Value{}, - }, - // Bools. - { - []reflect.Value{v(false), v(true), v(false)}, - []reflect.Value{v(false), v(false), v(true)}, - }, - // Ints. - { - []reflect.Value{v(2), v(1), v(3)}, - []reflect.Value{v(1), v(2), v(3)}, - }, - // Uints. - { - []reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))}, - []reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))}, - }, - // Floats. - { - []reflect.Value{v(2.0), v(1.0), v(3.0)}, - []reflect.Value{v(1.0), v(2.0), v(3.0)}, - }, - // Strings. - { - []reflect.Value{b, a, c}, - []reflect.Value{a, b, c}, - }, - // Array - { - []reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})}, - []reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})}, - }, - // Uintptrs. - { - []reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))}, - []reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))}, - }, - // SortableStructs. - { - // Note: not sorted - DisableMethods is set. - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - }, - // UnsortableStructs. - { - // Note: not sorted - SpewKeys is false. - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - }, - // Invalid. - { - []reflect.Value{embedB, embedA, embedC}, - []reflect.Value{embedB, embedA, embedC}, - }, - } - cs := spew.ConfigState{DisableMethods: true, SpewKeys: false} - helpTestSortValues(tests, &cs, t) -} - -// TestSortValuesWithMethods ensures the sort functionality for relect.Value -// based sorting works as intended when using string methods. -func TestSortValuesWithMethods(t *testing.T) { - v := reflect.ValueOf - - a := v("a") - b := v("b") - c := v("c") - tests := []sortTestCase{ - // Ints. - { - []reflect.Value{v(2), v(1), v(3)}, - []reflect.Value{v(1), v(2), v(3)}, - }, - // Strings. - { - []reflect.Value{b, a, c}, - []reflect.Value{a, b, c}, - }, - // SortableStructs. - { - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, - }, - // UnsortableStructs. - { - // Note: not sorted - SpewKeys is false. - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - }, - } - cs := spew.ConfigState{DisableMethods: false, SpewKeys: false} - helpTestSortValues(tests, &cs, t) -} - -// TestSortValuesWithSpew ensures the sort functionality for relect.Value -// based sorting works as intended when using spew to stringify keys. -func TestSortValuesWithSpew(t *testing.T) { - v := reflect.ValueOf - - a := v("a") - b := v("b") - c := v("c") - tests := []sortTestCase{ - // Ints. - { - []reflect.Value{v(2), v(1), v(3)}, - []reflect.Value{v(1), v(2), v(3)}, - }, - // Strings. - { - []reflect.Value{b, a, c}, - []reflect.Value{a, b, c}, - }, - // SortableStructs. - { - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, - }, - // UnsortableStructs. - { - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - []reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})}, - }, - } - cs := spew.ConfigState{DisableMethods: true, SpewKeys: true} - helpTestSortValues(tests, &cs, t) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/davecgh/go-spew/spew/dump_test.go deleted file mode 100644 index 2b320401..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump_test.go +++ /dev/null @@ -1,1042 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Test Summary: -NOTE: For each test, a nil pointer, a single pointer and double pointer to the -base test element are also tested to ensure proper indirection across all types. - -- Max int8, int16, int32, int64, int -- Max uint8, uint16, uint32, uint64, uint -- Boolean true and false -- Standard complex64 and complex128 -- Array containing standard ints -- Array containing type with custom formatter on pointer receiver only -- Array containing interfaces -- Array containing bytes -- Slice containing standard float32 values -- Slice containing type with custom formatter on pointer receiver only -- Slice containing interfaces -- Slice containing bytes -- Nil slice -- Standard string -- Nil interface -- Sub-interface -- Map with string keys and int vals -- Map with custom formatter type on pointer receiver only keys and vals -- Map with interface keys and values -- Map with nil interface value -- Struct with primitives -- Struct that contains another struct -- Struct that contains custom type with Stringer pointer interface via both - exported and unexported fields -- Struct that contains embedded struct and field to same struct -- Uintptr to 0 (null pointer) -- Uintptr address of real variable -- Unsafe.Pointer to 0 (null pointer) -- Unsafe.Pointer to address of real variable -- Nil channel -- Standard int channel -- Function with no params and no returns -- Function with param and no returns -- Function with multiple params and multiple returns -- Struct that is circular through self referencing -- Structs that are circular through cross referencing -- Structs that are indirectly circular -- Type that panics in its Stringer interface -*/ - -package spew_test - -import ( - "bytes" - "fmt" - "testing" - "unsafe" - - "github.com/davecgh/go-spew/spew" -) - -// dumpTest is used to describe a test to be perfomed against the Dump method. -type dumpTest struct { - in interface{} - wants []string -} - -// dumpTests houses all of the tests to be performed against the Dump method. -var dumpTests = make([]dumpTest, 0) - -// addDumpTest is a helper method to append the passed input and desired result -// to dumpTests -func addDumpTest(in interface{}, wants ...string) { - test := dumpTest{in, wants} - dumpTests = append(dumpTests, test) -} - -func addIntDumpTests() { - // Max int8. - v := int8(127) - nv := (*int8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "int8" - vs := "127" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Max int16. - v2 := int16(32767) - nv2 := (*int16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "int16" - v2s := "32767" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Max int32. - v3 := int32(2147483647) - nv3 := (*int32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "int32" - v3s := "2147483647" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Max int64. - v4 := int64(9223372036854775807) - nv4 := (*int64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "int64" - v4s := "9223372036854775807" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") - - // Max int. - v5 := int(2147483647) - nv5 := (*int)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "int" - v5s := "2147483647" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") - addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") - addDumpTest(nv5, "(*"+v5t+")()\n") -} - -func addUintDumpTests() { - // Max uint8. - v := uint8(255) - nv := (*uint8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uint8" - vs := "255" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Max uint16. - v2 := uint16(65535) - nv2 := (*uint16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Max uint32. - v3 := uint32(4294967295) - nv3 := (*uint32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "uint32" - v3s := "4294967295" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Max uint64. - v4 := uint64(18446744073709551615) - nv4 := (*uint64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "uint64" - v4s := "18446744073709551615" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") - - // Max uint. - v5 := uint(4294967295) - nv5 := (*uint)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "uint" - v5s := "4294967295" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") - addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") - addDumpTest(nv5, "(*"+v5t+")()\n") -} - -func addBoolDumpTests() { - // Boolean true. - v := bool(true) - nv := (*bool)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "bool" - vs := "true" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Boolean false. - v2 := bool(false) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "bool" - v2s := "false" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") -} - -func addFloatDumpTests() { - // Standard float32. - v := float32(3.1415) - nv := (*float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "float32" - vs := "3.1415" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Standard float64. - v2 := float64(3.1415926) - nv2 := (*float64)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "float64" - v2s := "3.1415926" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") -} - -func addComplexDumpTests() { - // Standard complex64. - v := complex(float32(6), -2) - nv := (*complex64)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "complex64" - vs := "(6-2i)" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Standard complex128. - v2 := complex(float64(-6), 2) - nv2 := (*complex128)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "complex128" - v2s := "(-6+2i)" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") -} - -func addArrayDumpTests() { - // Array containing standard ints. - v := [3]int{1, 2, 3} - vLen := fmt.Sprintf("%d", len(v)) - vCap := fmt.Sprintf("%d", cap(v)) - nv := (*[3]int)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "int" - vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" + - vt + ") 2,\n (" + vt + ") 3\n}" - addDumpTest(v, "([3]"+vt+") "+vs+"\n") - addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*[3]"+vt+")()\n") - - // Array containing type with custom formatter on pointer receiver only. - v2i0 := pstringer("1") - v2i1 := pstringer("2") - v2i2 := pstringer("3") - v2 := [3]pstringer{v2i0, v2i1, v2i2} - v2i0Len := fmt.Sprintf("%d", len(v2i0)) - v2i1Len := fmt.Sprintf("%d", len(v2i1)) - v2i2Len := fmt.Sprintf("%d", len(v2i2)) - v2Len := fmt.Sprintf("%d", len(v2)) - v2Cap := fmt.Sprintf("%d", cap(v2)) - nv2 := (*[3]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.pstringer" - v2sp := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + - ") (len=" + v2i0Len + ") stringer 1,\n (" + v2t + - ") (len=" + v2i1Len + ") stringer 2,\n (" + v2t + - ") (len=" + v2i2Len + ") " + "stringer 3\n}" - v2s := v2sp - if spew.UnsafeDisabled { - v2s = "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + - ") (len=" + v2i0Len + ") \"1\",\n (" + v2t + ") (len=" + - v2i1Len + ") \"2\",\n (" + v2t + ") (len=" + v2i2Len + - ") " + "\"3\"\n}" - } - addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2sp+")\n") - addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2sp+")\n") - addDumpTest(nv2, "(*[3]"+v2t+")()\n") - - // Array containing interfaces. - v3i0 := "one" - v3 := [3]interface{}{v3i0, int(2), uint(3)} - v3i0Len := fmt.Sprintf("%d", len(v3i0)) - v3Len := fmt.Sprintf("%d", len(v3)) - v3Cap := fmt.Sprintf("%d", cap(v3)) - nv3 := (*[3]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[3]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + - "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + - v3t4 + ") 3\n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Array containing bytes. - v4 := [34]byte{ - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, - } - v4Len := fmt.Sprintf("%d", len(v4)) - v4Cap := fmt.Sprintf("%d", cap(v4)) - nv4 := (*[34]byte)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "[34]uint8" - v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + - "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + - " |............... |\n" + - " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + - " |!\"#$%&'()*+,-./0|\n" + - " 00000020 31 32 " + - " |12|\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") -} - -func addSliceDumpTests() { - // Slice containing standard float32 values. - v := []float32{3.14, 6.28, 12.56} - vLen := fmt.Sprintf("%d", len(v)) - vCap := fmt.Sprintf("%d", cap(v)) - nv := (*[]float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "float32" - vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" + - vt + ") 6.28,\n (" + vt + ") 12.56\n}" - addDumpTest(v, "([]"+vt+") "+vs+"\n") - addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*[]"+vt+")()\n") - - // Slice containing type with custom formatter on pointer receiver only. - v2i0 := pstringer("1") - v2i1 := pstringer("2") - v2i2 := pstringer("3") - v2 := []pstringer{v2i0, v2i1, v2i2} - v2i0Len := fmt.Sprintf("%d", len(v2i0)) - v2i1Len := fmt.Sprintf("%d", len(v2i1)) - v2i2Len := fmt.Sprintf("%d", len(v2i2)) - v2Len := fmt.Sprintf("%d", len(v2)) - v2Cap := fmt.Sprintf("%d", cap(v2)) - nv2 := (*[]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.pstringer" - v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" + - v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len + - ") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " + - "stringer 3\n}" - addDumpTest(v2, "([]"+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*[]"+v2t+")()\n") - - // Slice containing interfaces. - v3i0 := "one" - v3 := []interface{}{v3i0, int(2), uint(3), nil} - v3i0Len := fmt.Sprintf("%d", len(v3i0)) - v3Len := fmt.Sprintf("%d", len(v3)) - v3Cap := fmt.Sprintf("%d", cap(v3)) - nv3 := (*[]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3t5 := "interface {}" - v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + - "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + - v3t4 + ") 3,\n (" + v3t5 + ") \n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Slice containing bytes. - v4 := []byte{ - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, - } - v4Len := fmt.Sprintf("%d", len(v4)) - v4Cap := fmt.Sprintf("%d", cap(v4)) - nv4 := (*[]byte)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "[]uint8" - v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + - "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + - " |............... |\n" + - " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + - " |!\"#$%&'()*+,-./0|\n" + - " 00000020 31 32 " + - " |12|\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") - - // Nil slice. - v5 := []int(nil) - nv5 := (*[]int)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "[]int" - v5s := "" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") - addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") - addDumpTest(nv5, "(*"+v5t+")()\n") -} - -func addStringDumpTests() { - // Standard string. - v := "test" - vLen := fmt.Sprintf("%d", len(v)) - nv := (*string)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "string" - vs := "(len=" + vLen + ") \"test\"" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -func addInterfaceDumpTests() { - // Nil interface. - var v interface{} - nv := (*interface{})(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "interface {}" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Sub-interface. - v2 := interface{}(uint16(65535)) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") -} - -func addMapDumpTests() { - // Map with string keys and int vals. - k := "one" - kk := "two" - m := map[string]int{k: 1, kk: 2} - klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up - kkLen := fmt.Sprintf("%d", len(kk)) - mLen := fmt.Sprintf("%d", len(m)) - nilMap := map[string]int(nil) - nm := (*map[string]int)(nil) - pm := &m - mAddr := fmt.Sprintf("%p", pm) - pmAddr := fmt.Sprintf("%p", &pm) - mt := "map[string]int" - mt1 := "string" - mt2 := "int" - ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " + - "\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen + - ") \"two\": (" + mt2 + ") 2\n}" - ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " + - "\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen + - ") \"one\": (" + mt2 + ") 1\n}" - addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n") - addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n", - "(*"+mt+")("+mAddr+")("+ms2+")\n") - addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n", - "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n") - addDumpTest(nm, "(*"+mt+")()\n") - addDumpTest(nilMap, "("+mt+") \n") - - // Map with custom formatter type on pointer receiver only keys and vals. - k2 := pstringer("one") - v2 := pstringer("1") - m2 := map[pstringer]pstringer{k2: v2} - k2Len := fmt.Sprintf("%d", len(k2)) - v2Len := fmt.Sprintf("%d", len(v2)) - m2Len := fmt.Sprintf("%d", len(m2)) - nilMap2 := map[pstringer]pstringer(nil) - nm2 := (*map[pstringer]pstringer)(nil) - pm2 := &m2 - m2Addr := fmt.Sprintf("%p", pm2) - pm2Addr := fmt.Sprintf("%p", &pm2) - m2t := "map[spew_test.pstringer]spew_test.pstringer" - m2t1 := "spew_test.pstringer" - m2t2 := "spew_test.pstringer" - m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " + - "stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}" - if spew.UnsafeDisabled { - m2s = "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + - ") " + "\"one\": (" + m2t2 + ") (len=" + v2Len + - ") \"1\"\n}" - } - addDumpTest(m2, "("+m2t+") "+m2s+"\n") - addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n") - addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n") - addDumpTest(nm2, "(*"+m2t+")()\n") - addDumpTest(nilMap2, "("+m2t+") \n") - - // Map with interface keys and values. - k3 := "one" - k3Len := fmt.Sprintf("%d", len(k3)) - m3 := map[interface{}]interface{}{k3: 1} - m3Len := fmt.Sprintf("%d", len(m3)) - nilMap3 := map[interface{}]interface{}(nil) - nm3 := (*map[interface{}]interface{})(nil) - pm3 := &m3 - m3Addr := fmt.Sprintf("%p", pm3) - pm3Addr := fmt.Sprintf("%p", &pm3) - m3t := "map[interface {}]interface {}" - m3t1 := "string" - m3t2 := "int" - m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " + - "\"one\": (" + m3t2 + ") 1\n}" - addDumpTest(m3, "("+m3t+") "+m3s+"\n") - addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n") - addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n") - addDumpTest(nm3, "(*"+m3t+")()\n") - addDumpTest(nilMap3, "("+m3t+") \n") - - // Map with nil interface value. - k4 := "nil" - k4Len := fmt.Sprintf("%d", len(k4)) - m4 := map[string]interface{}{k4: nil} - m4Len := fmt.Sprintf("%d", len(m4)) - nilMap4 := map[string]interface{}(nil) - nm4 := (*map[string]interface{})(nil) - pm4 := &m4 - m4Addr := fmt.Sprintf("%p", pm4) - pm4Addr := fmt.Sprintf("%p", &pm4) - m4t := "map[string]interface {}" - m4t1 := "string" - m4t2 := "interface {}" - m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" + - " \"nil\": (" + m4t2 + ") \n}" - addDumpTest(m4, "("+m4t+") "+m4s+"\n") - addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n") - addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n") - addDumpTest(nm4, "(*"+m4t+")()\n") - addDumpTest(nilMap4, "("+m4t+") \n") -} - -func addStructDumpTests() { - // Struct with primitives. - type s1 struct { - a int8 - b uint8 - } - v := s1{127, 255} - nv := (*s1)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.s1" - vt2 := "int8" - vt3 := "uint8" - vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Struct that contains another struct. - type s2 struct { - s1 s1 - b bool - } - v2 := s2{s1{127, 255}, true} - nv2 := (*s2)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.s2" - v2t2 := "spew_test.s1" - v2t3 := "int8" - v2t4 := "uint8" - v2t5 := "bool" - v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" + - v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Struct that contains custom type with Stringer pointer interface via both - // exported and unexported fields. - type s3 struct { - s pstringer - S pstringer - } - v3 := s3{"test", "test2"} - nv3 := (*s3)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.s3" - v3t2 := "spew_test.pstringer" - v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 + - ") (len=5) stringer test2\n}" - v3sp := v3s - if spew.UnsafeDisabled { - v3s = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + - v3t2 + ") (len=5) \"test2\"\n}" - v3sp = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + - v3t2 + ") (len=5) stringer test2\n}" - } - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3sp+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3sp+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Struct that contains embedded struct and field to same struct. - e := embed{"embedstr"} - eLen := fmt.Sprintf("%d", len("embedstr")) - v4 := embedwrap{embed: &e, e: &e} - nv4 := (*embedwrap)(nil) - pv4 := &v4 - eAddr := fmt.Sprintf("%p", &e) - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "spew_test.embedwrap" - v4t2 := "spew_test.embed" - v4t3 := "string" - v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n a: (" + v4t3 + - ") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 + - ")(" + eAddr + ")({\n a: (" + v4t3 + ") (len=" + eLen + ")" + - " \"embedstr\"\n })\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") -} - -func addUintptrDumpTests() { - // Null pointer. - v := uintptr(0) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uintptr" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - - // Address of real variable. - i := 1 - v2 := uintptr(unsafe.Pointer(&i)) - nv2 := (*uintptr)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uintptr" - v2s := fmt.Sprintf("%p", &i) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") -} - -func addUnsafePointerDumpTests() { - // Null pointer. - v := unsafe.Pointer(uintptr(0)) - nv := (*unsafe.Pointer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "unsafe.Pointer" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Address of real variable. - i := 1 - v2 := unsafe.Pointer(&i) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "unsafe.Pointer" - v2s := fmt.Sprintf("%p", &i) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -func addChanDumpTests() { - // Nil channel. - var v chan int - pv := &v - nv := (*chan int)(nil) - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "chan int" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Real channel. - v2 := make(chan int) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "chan int" - v2s := fmt.Sprintf("%p", v2) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") -} - -func addFuncDumpTests() { - // Function with no params and no returns. - v := addIntDumpTests - nv := (*func())(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "func()" - vs := fmt.Sprintf("%p", v) - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Function with param and no returns. - v2 := TestDump - nv2 := (*func(*testing.T))(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "func(*testing.T)" - v2s := fmt.Sprintf("%p", v2) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Function with multiple params and multiple returns. - var v3 = func(i int, s string) (b bool, err error) { - return true, nil - } - nv3 := (*func(int, string) (bool, error))(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "func(int, string) (bool, error)" - v3s := fmt.Sprintf("%p", v3) - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") -} - -func addCircularDumpTests() { - // Struct that is circular through self referencing. - type circular struct { - c *circular - } - v := circular{nil} - v.c = &v - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.circular" - vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" + - vAddr + ")()\n })\n}" - vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")()\n}" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n") - - // Structs that are circular through cross referencing. - v2 := xref1{nil} - ts2 := xref2{&v2} - v2.ps2 = &ts2 - pv2 := &v2 - ts2Addr := fmt.Sprintf("%p", &ts2) - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.xref1" - v2t2 := "spew_test.xref2" - v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + - ")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr + - ")()\n })\n })\n}" - v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + - ")(" + v2Addr + ")()\n })\n}" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n") - - // Structs that are indirectly circular. - v3 := indirCir1{nil} - tic2 := indirCir2{nil} - tic3 := indirCir3{&v3} - tic2.ps3 = &tic3 - v3.ps2 = &tic2 - pv3 := &v3 - tic2Addr := fmt.Sprintf("%p", &tic2) - tic3Addr := fmt.Sprintf("%p", &tic3) - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.indirCir1" - v3t2 := "spew_test.indirCir2" - v3t3 := "spew_test.indirCir3" - v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + - ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + - ")({\n ps2: (*" + v3t2 + ")(" + tic2Addr + - ")()\n })\n })\n })\n}" - v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + - ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + - ")()\n })\n })\n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n") -} - -func addPanicDumpTests() { - // Type that panics in its Stringer interface. - v := panicer(127) - nv := (*panicer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.panicer" - vs := "(PANIC=test panic)127" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -func addErrorDumpTests() { - // Type that has a custom Error interface. - v := customError(127) - nv := (*customError)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.customError" - vs := "error: 127" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -// TestDump executes all of the tests described by dumpTests. -func TestDump(t *testing.T) { - // Setup tests. - addIntDumpTests() - addUintDumpTests() - addBoolDumpTests() - addFloatDumpTests() - addComplexDumpTests() - addArrayDumpTests() - addSliceDumpTests() - addStringDumpTests() - addInterfaceDumpTests() - addMapDumpTests() - addStructDumpTests() - addUintptrDumpTests() - addUnsafePointerDumpTests() - addChanDumpTests() - addFuncDumpTests() - addCircularDumpTests() - addPanicDumpTests() - addErrorDumpTests() - addCgoDumpTests() - - t.Logf("Running %d tests", len(dumpTests)) - for i, test := range dumpTests { - buf := new(bytes.Buffer) - spew.Fdump(buf, test.in) - s := buf.String() - if testFailed(s, test.wants) { - t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants)) - continue - } - } -} - -func TestDumpSortedKeys(t *testing.T) { - cfg := spew.ConfigState{SortKeys: true} - s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"}) - expected := "(map[int]string) (len=3) {\n(int) 1: (string) (len=1) " + - "\"1\",\n(int) 2: (string) (len=1) \"2\",\n(int) 3: (string) " + - "(len=1) \"3\"\n" + - "}\n" - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - - s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2}) - expected = "(map[spew_test.stringer]int) (len=3) {\n" + - "(spew_test.stringer) (len=1) stringer 1: (int) 1,\n" + - "(spew_test.stringer) (len=1) stringer 2: (int) 2,\n" + - "(spew_test.stringer) (len=1) stringer 3: (int) 3\n" + - "}\n" - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - - s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) - expected = "(map[spew_test.pstringer]int) (len=3) {\n" + - "(spew_test.pstringer) (len=1) stringer 1: (int) 1,\n" + - "(spew_test.pstringer) (len=1) stringer 2: (int) 2,\n" + - "(spew_test.pstringer) (len=1) stringer 3: (int) 3\n" + - "}\n" - if spew.UnsafeDisabled { - expected = "(map[spew_test.pstringer]int) (len=3) {\n" + - "(spew_test.pstringer) (len=1) \"1\": (int) 1,\n" + - "(spew_test.pstringer) (len=1) \"2\": (int) 2,\n" + - "(spew_test.pstringer) (len=1) \"3\": (int) 3\n" + - "}\n" - } - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - - s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) - expected = "(map[spew_test.customError]int) (len=3) {\n" + - "(spew_test.customError) error: 1: (int) 1,\n" + - "(spew_test.customError) error: 2: (int) 2,\n" + - "(spew_test.customError) error: 3: (int) 3\n" + - "}\n" - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - -} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go deleted file mode 100644 index 18a38358..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2013 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when both cgo is supported and "-tags testcgo" is added to the go test -// command line. This means the cgo tests are only added (and hence run) when -// specifially requested. This configuration is used because spew itself -// does not require cgo to run even though it does handle certain cgo types -// specially. Rather than forcing all clients to require cgo and an external -// C compiler just to run the tests, this scheme makes them optional. -// +build cgo,testcgo - -package spew_test - -import ( - "fmt" - - "github.com/davecgh/go-spew/spew/testdata" -) - -func addCgoDumpTests() { - // C char pointer. - v := testdata.GetCgoCharPointer() - nv := testdata.GetCgoNullCharPointer() - pv := &v - vcAddr := fmt.Sprintf("%p", v) - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "*testdata._Ctype_char" - vs := "116" - addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n") - addDumpTest(nv, "("+vt+")()\n") - - // C char array. - v2, v2l, v2c := testdata.GetCgoCharArray() - v2Len := fmt.Sprintf("%d", v2l) - v2Cap := fmt.Sprintf("%d", v2c) - v2t := "[6]testdata._Ctype_char" - v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " + - "{\n 00000000 74 65 73 74 32 00 " + - " |test2.|\n}" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - - // C unsigned char array. - v3, v3l, v3c := testdata.GetCgoUnsignedCharArray() - v3Len := fmt.Sprintf("%d", v3l) - v3Cap := fmt.Sprintf("%d", v3c) - v3t := "[6]testdata._Ctype_unsignedchar" - v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " + - "{\n 00000000 74 65 73 74 33 00 " + - " |test3.|\n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - - // C signed char array. - v4, v4l, v4c := testdata.GetCgoSignedCharArray() - v4Len := fmt.Sprintf("%d", v4l) - v4Cap := fmt.Sprintf("%d", v4c) - v4t := "[6]testdata._Ctype_schar" - v4t2 := "testdata._Ctype_schar" - v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + - "{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 + - ") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 + - ") 0\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - - // C uint8_t array. - v5, v5l, v5c := testdata.GetCgoUint8tArray() - v5Len := fmt.Sprintf("%d", v5l) - v5Cap := fmt.Sprintf("%d", v5c) - v5t := "[6]testdata._Ctype_uint8_t" - v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " + - "{\n 00000000 74 65 73 74 35 00 " + - " |test5.|\n}" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - - // C typedefed unsigned char array. - v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray() - v6Len := fmt.Sprintf("%d", v6l) - v6Cap := fmt.Sprintf("%d", v6c) - v6t := "[6]testdata._Ctype_custom_uchar_t" - v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " + - "{\n 00000000 74 65 73 74 36 00 " + - " |test6.|\n}" - addDumpTest(v6, "("+v6t+") "+v6s+"\n") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go deleted file mode 100644 index 52a0971f..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2013 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when either cgo is not supported or "-tags testcgo" is not added to the go -// test command line. This file intentionally does not setup any cgo tests in -// this scenario. -// +build !cgo !testcgo - -package spew_test - -func addCgoDumpTests() { - // Don't add any tests for cgo since this file is only compiled when - // there should not be any cgo tests. -} diff --git a/vendor/github.com/davecgh/go-spew/spew/example_test.go b/vendor/github.com/davecgh/go-spew/spew/example_test.go deleted file mode 100644 index de6c4e30..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/example_test.go +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew_test - -import ( - "fmt" - - "github.com/davecgh/go-spew/spew" -) - -type Flag int - -const ( - flagOne Flag = iota - flagTwo -) - -var flagStrings = map[Flag]string{ - flagOne: "flagOne", - flagTwo: "flagTwo", -} - -func (f Flag) String() string { - if s, ok := flagStrings[f]; ok { - return s - } - return fmt.Sprintf("Unknown flag (%d)", int(f)) -} - -type Bar struct { - data uintptr -} - -type Foo struct { - unexportedField Bar - ExportedField map[interface{}]interface{} -} - -// This example demonstrates how to use Dump to dump variables to stdout. -func ExampleDump() { - // The following package level declarations are assumed for this example: - /* - type Flag int - - const ( - flagOne Flag = iota - flagTwo - ) - - var flagStrings = map[Flag]string{ - flagOne: "flagOne", - flagTwo: "flagTwo", - } - - func (f Flag) String() string { - if s, ok := flagStrings[f]; ok { - return s - } - return fmt.Sprintf("Unknown flag (%d)", int(f)) - } - - type Bar struct { - data uintptr - } - - type Foo struct { - unexportedField Bar - ExportedField map[interface{}]interface{} - } - */ - - // Setup some sample data structures for the example. - bar := Bar{uintptr(0)} - s1 := Foo{bar, map[interface{}]interface{}{"one": true}} - f := Flag(5) - b := []byte{ - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, - } - - // Dump! - spew.Dump(s1, f, b) - - // Output: - // (spew_test.Foo) { - // unexportedField: (spew_test.Bar) { - // data: (uintptr) - // }, - // ExportedField: (map[interface {}]interface {}) (len=1) { - // (string) (len=3) "one": (bool) true - // } - // } - // (spew_test.Flag) Unknown flag (5) - // ([]uint8) (len=34 cap=34) { - // 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - // 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - // 00000020 31 32 |12| - // } - // -} - -// This example demonstrates how to use Printf to display a variable with a -// format string and inline formatting. -func ExamplePrintf() { - // Create a double pointer to a uint 8. - ui8 := uint8(5) - pui8 := &ui8 - ppui8 := &pui8 - - // Create a circular data type. - type circular struct { - ui8 uint8 - c *circular - } - c := circular{ui8: 1} - c.c = &c - - // Print! - spew.Printf("ppui8: %v\n", ppui8) - spew.Printf("circular: %v\n", c) - - // Output: - // ppui8: <**>5 - // circular: {1 <*>{1 <*>}} -} - -// This example demonstrates how to use a ConfigState. -func ExampleConfigState() { - // Modify the indent level of the ConfigState only. The global - // configuration is not modified. - scs := spew.ConfigState{Indent: "\t"} - - // Output using the ConfigState instance. - v := map[string]int{"one": 1} - scs.Printf("v: %v\n", v) - scs.Dump(v) - - // Output: - // v: map[one:1] - // (map[string]int) (len=1) { - // (string) (len=3) "one": (int) 1 - // } -} - -// This example demonstrates how to use ConfigState.Dump to dump variables to -// stdout -func ExampleConfigState_Dump() { - // See the top-level Dump example for details on the types used in this - // example. - - // Create two ConfigState instances with different indentation. - scs := spew.ConfigState{Indent: "\t"} - scs2 := spew.ConfigState{Indent: " "} - - // Setup some sample data structures for the example. - bar := Bar{uintptr(0)} - s1 := Foo{bar, map[interface{}]interface{}{"one": true}} - - // Dump using the ConfigState instances. - scs.Dump(s1) - scs2.Dump(s1) - - // Output: - // (spew_test.Foo) { - // unexportedField: (spew_test.Bar) { - // data: (uintptr) - // }, - // ExportedField: (map[interface {}]interface {}) (len=1) { - // (string) (len=3) "one": (bool) true - // } - // } - // (spew_test.Foo) { - // unexportedField: (spew_test.Bar) { - // data: (uintptr) - // }, - // ExportedField: (map[interface {}]interface {}) (len=1) { - // (string) (len=3) "one": (bool) true - // } - // } - // -} - -// This example demonstrates how to use ConfigState.Printf to display a variable -// with a format string and inline formatting. -func ExampleConfigState_Printf() { - // See the top-level Dump example for details on the types used in this - // example. - - // Create two ConfigState instances and modify the method handling of the - // first ConfigState only. - scs := spew.NewDefaultConfig() - scs2 := spew.NewDefaultConfig() - scs.DisableMethods = true - - // Alternatively - // scs := spew.ConfigState{Indent: " ", DisableMethods: true} - // scs2 := spew.ConfigState{Indent: " "} - - // This is of type Flag which implements a Stringer and has raw value 1. - f := flagTwo - - // Dump using the ConfigState instances. - scs.Printf("f: %v\n", f) - scs2.Printf("f: %v\n", f) - - // Output: - // f: 1 - // f: flagTwo -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/davecgh/go-spew/spew/format_test.go deleted file mode 100644 index b664b3f1..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/format_test.go +++ /dev/null @@ -1,1558 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Test Summary: -NOTE: For each test, a nil pointer, a single pointer and double pointer to the -base test element are also tested to ensure proper indirection across all types. - -- Max int8, int16, int32, int64, int -- Max uint8, uint16, uint32, uint64, uint -- Boolean true and false -- Standard complex64 and complex128 -- Array containing standard ints -- Array containing type with custom formatter on pointer receiver only -- Array containing interfaces -- Slice containing standard float32 values -- Slice containing type with custom formatter on pointer receiver only -- Slice containing interfaces -- Nil slice -- Standard string -- Nil interface -- Sub-interface -- Map with string keys and int vals -- Map with custom formatter type on pointer receiver only keys and vals -- Map with interface keys and values -- Map with nil interface value -- Struct with primitives -- Struct that contains another struct -- Struct that contains custom type with Stringer pointer interface via both - exported and unexported fields -- Struct that contains embedded struct and field to same struct -- Uintptr to 0 (null pointer) -- Uintptr address of real variable -- Unsafe.Pointer to 0 (null pointer) -- Unsafe.Pointer to address of real variable -- Nil channel -- Standard int channel -- Function with no params and no returns -- Function with param and no returns -- Function with multiple params and multiple returns -- Struct that is circular through self referencing -- Structs that are circular through cross referencing -- Structs that are indirectly circular -- Type that panics in its Stringer interface -- Type that has a custom Error interface -- %x passthrough with uint -- %#x passthrough with uint -- %f passthrough with precision -- %f passthrough with width and precision -- %d passthrough with width -- %q passthrough with string -*/ - -package spew_test - -import ( - "bytes" - "fmt" - "testing" - "unsafe" - - "github.com/davecgh/go-spew/spew" -) - -// formatterTest is used to describe a test to be perfomed against NewFormatter. -type formatterTest struct { - format string - in interface{} - wants []string -} - -// formatterTests houses all of the tests to be performed against NewFormatter. -var formatterTests = make([]formatterTest, 0) - -// addFormatterTest is a helper method to append the passed input and desired -// result to formatterTests. -func addFormatterTest(format string, in interface{}, wants ...string) { - test := formatterTest{format, in, wants} - formatterTests = append(formatterTests, test) -} - -func addIntFormatterTests() { - // Max int8. - v := int8(127) - nv := (*int8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "int8" - vs := "127" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Max int16. - v2 := int16(32767) - nv2 := (*int16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "int16" - v2s := "32767" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Max int32. - v3 := int32(2147483647) - nv3 := (*int32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "int32" - v3s := "2147483647" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - - // Max int64. - v4 := int64(9223372036854775807) - nv4 := (*int64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "int64" - v4s := "9223372036854775807" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") - - // Max int. - v5 := int(2147483647) - nv5 := (*int)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "int" - v5s := "2147483647" - addFormatterTest("%v", v5, v5s) - addFormatterTest("%v", pv5, "<*>"+v5s) - addFormatterTest("%v", &pv5, "<**>"+v5s) - addFormatterTest("%v", nv5, "") - addFormatterTest("%+v", v5, v5s) - addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) - addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%+v", nv5, "") - addFormatterTest("%#v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) - addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) - addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") - addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) - addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"") -} - -func addUintFormatterTests() { - // Max uint8. - v := uint8(255) - nv := (*uint8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uint8" - vs := "255" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Max uint16. - v2 := uint16(65535) - nv2 := (*uint16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Max uint32. - v3 := uint32(4294967295) - nv3 := (*uint32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "uint32" - v3s := "4294967295" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - - // Max uint64. - v4 := uint64(18446744073709551615) - nv4 := (*uint64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "uint64" - v4s := "18446744073709551615" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") - - // Max uint. - v5 := uint(4294967295) - nv5 := (*uint)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "uint" - v5s := "4294967295" - addFormatterTest("%v", v5, v5s) - addFormatterTest("%v", pv5, "<*>"+v5s) - addFormatterTest("%v", &pv5, "<**>"+v5s) - addFormatterTest("%v", nv5, "") - addFormatterTest("%+v", v5, v5s) - addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) - addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%+v", nv5, "") - addFormatterTest("%#v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) - addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) - addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") - addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) - addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") -} - -func addBoolFormatterTests() { - // Boolean true. - v := bool(true) - nv := (*bool)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "bool" - vs := "true" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Boolean false. - v2 := bool(false) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "bool" - v2s := "false" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addFloatFormatterTests() { - // Standard float32. - v := float32(3.1415) - nv := (*float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "float32" - vs := "3.1415" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Standard float64. - v2 := float64(3.1415926) - nv2 := (*float64)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "float64" - v2s := "3.1415926" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") -} - -func addComplexFormatterTests() { - // Standard complex64. - v := complex(float32(6), -2) - nv := (*complex64)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "complex64" - vs := "(6-2i)" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Standard complex128. - v2 := complex(float64(-6), 2) - nv2 := (*complex128)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "complex128" - v2s := "(-6+2i)" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") -} - -func addArrayFormatterTests() { - // Array containing standard ints. - v := [3]int{1, 2, 3} - nv := (*[3]int)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "[3]int" - vs := "[1 2 3]" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Array containing type with custom formatter on pointer receiver only. - v2 := [3]pstringer{"1", "2", "3"} - nv2 := (*[3]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "[3]spew_test.pstringer" - v2sp := "[stringer 1 stringer 2 stringer 3]" - v2s := v2sp - if spew.UnsafeDisabled { - v2s = "[1 2 3]" - } - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2sp) - addFormatterTest("%v", &pv2, "<**>"+v2sp) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2sp) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2sp) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2sp) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2sp) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2sp) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2sp) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Array containing interfaces. - v3 := [3]interface{}{"one", int(2), uint(3)} - nv3 := (*[3]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[3]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3s := "[one 2 3]" - v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") -} - -func addSliceFormatterTests() { - // Slice containing standard float32 values. - v := []float32{3.14, 6.28, 12.56} - nv := (*[]float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "[]float32" - vs := "[3.14 6.28 12.56]" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Slice containing type with custom formatter on pointer receiver only. - v2 := []pstringer{"1", "2", "3"} - nv2 := (*[]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "[]spew_test.pstringer" - v2s := "[stringer 1 stringer 2 stringer 3]" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Slice containing interfaces. - v3 := []interface{}{"one", int(2), uint(3), nil} - nv3 := (*[]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3t5 := "interface {}" - v3s := "[one 2 3 ]" - v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 + - ")]" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") - - // Nil slice. - var v4 []int - nv4 := (*[]int)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "[]int" - v4s := "" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") -} - -func addStringFormatterTests() { - // Standard string. - v := "test" - nv := (*string)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "string" - vs := "test" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") -} - -func addInterfaceFormatterTests() { - // Nil interface. - var v interface{} - nv := (*interface{})(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "interface {}" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Sub-interface. - v2 := interface{}(uint16(65535)) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addMapFormatterTests() { - // Map with string keys and int vals. - v := map[string]int{"one": 1, "two": 2} - nilMap := map[string]int(nil) - nv := (*map[string]int)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "map[string]int" - vs := "map[one:1 two:2]" - vs2 := "map[two:2 one:1]" - addFormatterTest("%v", v, vs, vs2) - addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2) - addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2) - addFormatterTest("%+v", nilMap, "") - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs, vs2) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs, - "<**>("+pvAddr+"->"+vAddr+")"+vs2) - addFormatterTest("%+v", nilMap, "") - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2) - addFormatterTest("%#v", nilMap, "("+vt+")"+"") - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs, - "(*"+vt+")("+vAddr+")"+vs2) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs, - "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2) - addFormatterTest("%#+v", nilMap, "("+vt+")"+"") - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Map with custom formatter type on pointer receiver only keys and vals. - v2 := map[pstringer]pstringer{"one": "1"} - nv2 := (*map[pstringer]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "map[spew_test.pstringer]spew_test.pstringer" - v2s := "map[stringer one:stringer 1]" - if spew.UnsafeDisabled { - v2s = "map[one:1]" - } - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Map with interface keys and values. - v3 := map[interface{}]interface{}{"one": 1} - nv3 := (*map[interface{}]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "map[interface {}]interface {}" - v3t1 := "string" - v3t2 := "int" - v3s := "map[one:1]" - v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") - - // Map with nil interface value - v4 := map[string]interface{}{"nil": nil} - nv4 := (*map[string]interface{})(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "map[string]interface {}" - v4t1 := "interface {}" - v4s := "map[nil:]" - v4s2 := "map[nil:(" + v4t1 + ")]" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s2) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") -} - -func addStructFormatterTests() { - // Struct with primitives. - type s1 struct { - a int8 - b uint8 - } - v := s1{127, 255} - nv := (*s1)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.s1" - vt2 := "int8" - vt3 := "uint8" - vs := "{127 255}" - vs2 := "{a:127 b:255}" - vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs2) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs3) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs3) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs3) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Struct that contains another struct. - type s2 struct { - s1 s1 - b bool - } - v2 := s2{s1{127, 255}, true} - nv2 := (*s2)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.s2" - v2t2 := "spew_test.s1" - v2t3 := "int8" - v2t4 := "uint8" - v2t5 := "bool" - v2s := "{{127 255} true}" - v2s2 := "{s1:{a:127 b:255} b:true}" - v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" + - v2t5 + ")true}" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s2) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s3) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Struct that contains custom type with Stringer pointer interface via both - // exported and unexported fields. - type s3 struct { - s pstringer - S pstringer - } - v3 := s3{"test", "test2"} - nv3 := (*s3)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.s3" - v3t2 := "spew_test.pstringer" - v3s := "{stringer test stringer test2}" - v3sp := v3s - v3s2 := "{s:stringer test S:stringer test2}" - v3s2p := v3s2 - v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}" - v3s3p := v3s3 - if spew.UnsafeDisabled { - v3s = "{test test2}" - v3sp = "{test stringer test2}" - v3s2 = "{s:test S:test2}" - v3s2p = "{s:test S:stringer test2}" - v3s3 = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")test2}" - v3s3p = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")stringer test2}" - } - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3sp) - addFormatterTest("%v", &pv3, "<**>"+v3sp) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s2) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2p) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2p) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s3) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3p) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3p) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3p) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3p) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") - - // Struct that contains embedded struct and field to same struct. - e := embed{"embedstr"} - v4 := embedwrap{embed: &e, e: &e} - nv4 := (*embedwrap)(nil) - pv4 := &v4 - eAddr := fmt.Sprintf("%p", &e) - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "spew_test.embedwrap" - v4t2 := "spew_test.embed" - v4t3 := "string" - v4s := "{<*>{embedstr} <*>{embedstr}}" - v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr + - "){a:embedstr}}" - v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 + - "){a:(" + v4t3 + ")embedstr}}" - v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + - ")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%+v", v4, v4s2) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s3) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") -} - -func addUintptrFormatterTests() { - // Null pointer. - v := uintptr(0) - nv := (*uintptr)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uintptr" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Address of real variable. - i := 1 - v2 := uintptr(unsafe.Pointer(&i)) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uintptr" - v2s := fmt.Sprintf("%p", &i) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addUnsafePointerFormatterTests() { - // Null pointer. - v := unsafe.Pointer(uintptr(0)) - nv := (*unsafe.Pointer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "unsafe.Pointer" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Address of real variable. - i := 1 - v2 := unsafe.Pointer(&i) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "unsafe.Pointer" - v2s := fmt.Sprintf("%p", &i) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addChanFormatterTests() { - // Nil channel. - var v chan int - pv := &v - nv := (*chan int)(nil) - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "chan int" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Real channel. - v2 := make(chan int) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "chan int" - v2s := fmt.Sprintf("%p", v2) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addFuncFormatterTests() { - // Function with no params and no returns. - v := addIntFormatterTests - nv := (*func())(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "func()" - vs := fmt.Sprintf("%p", v) - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Function with param and no returns. - v2 := TestFormatter - nv2 := (*func(*testing.T))(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "func(*testing.T)" - v2s := fmt.Sprintf("%p", v2) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Function with multiple params and multiple returns. - var v3 = func(i int, s string) (b bool, err error) { - return true, nil - } - nv3 := (*func(int, string) (bool, error))(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "func(int, string) (bool, error)" - v3s := fmt.Sprintf("%p", v3) - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") -} - -func addCircularFormatterTests() { - // Struct that is circular through self referencing. - type circular struct { - c *circular - } - v := circular{nil} - v.c = &v - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.circular" - vs := "{<*>{<*>}}" - vs2 := "{<*>}" - vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")}}" - vs4 := "{c:<*>(" + vAddr + ")}" - vs5 := "{c:(*" + vt + "){c:(*" + vt + ")}}" - vs6 := "{c:(*" + vt + ")}" - vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr + - ")}}" - vs8 := "{c:(*" + vt + ")(" + vAddr + ")}" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs2) - addFormatterTest("%v", &pv, "<**>"+vs2) - addFormatterTest("%+v", v, vs3) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4) - addFormatterTest("%#v", v, "("+vt+")"+vs5) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs6) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6) - addFormatterTest("%#+v", v, "("+vt+")"+vs7) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8) - - // Structs that are circular through cross referencing. - v2 := xref1{nil} - ts2 := xref2{&v2} - v2.ps2 = &ts2 - pv2 := &v2 - ts2Addr := fmt.Sprintf("%p", &ts2) - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.xref1" - v2t2 := "spew_test.xref2" - v2s := "{<*>{<*>{<*>}}}" - v2s2 := "{<*>{<*>}}" - v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" + - ts2Addr + ")}}}" - v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")}}" - v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 + - ")}}}" - v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")}}" - v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + - ")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr + - ")}}}" - v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + - ")(" + v2Addr + ")}}" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s2) - addFormatterTest("%v", &pv2, "<**>"+v2s2) - addFormatterTest("%+v", v2, v2s3) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s5) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8) - - // Structs that are indirectly circular. - v3 := indirCir1{nil} - tic2 := indirCir2{nil} - tic3 := indirCir3{&v3} - tic2.ps3 = &tic3 - v3.ps2 = &tic2 - pv3 := &v3 - tic2Addr := fmt.Sprintf("%p", &tic2) - tic3Addr := fmt.Sprintf("%p", &tic3) - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.indirCir1" - v3t2 := "spew_test.indirCir2" - v3t3 := "spew_test.indirCir3" - v3s := "{<*>{<*>{<*>{<*>}}}}" - v3s2 := "{<*>{<*>{<*>}}}" - v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + - v3Addr + "){ps2:<*>(" + tic2Addr + ")}}}}" - v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + - v3Addr + ")}}}" - v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + - "){ps2:(*" + v3t2 + ")}}}}" - v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + - ")}}}" - v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + - tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 + - ")(" + tic2Addr + ")}}}}" - v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + - tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")}}}" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s2) - addFormatterTest("%v", &pv3, "<**>"+v3s2) - addFormatterTest("%+v", v3, v3s3) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4) - addFormatterTest("%#v", v3, "("+v3t+")"+v3s5) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6) - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8) -} - -func addPanicFormatterTests() { - // Type that panics in its Stringer interface. - v := panicer(127) - nv := (*panicer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.panicer" - vs := "(PANIC=test panic)127" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") -} - -func addErrorFormatterTests() { - // Type that has a custom Error interface. - v := customError(127) - nv := (*customError)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.customError" - vs := "error: 127" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") -} - -func addPassthroughFormatterTests() { - // %x passthrough with uint. - v := uint(4294967295) - pv := &v - vAddr := fmt.Sprintf("%x", pv) - pvAddr := fmt.Sprintf("%x", &pv) - vs := "ffffffff" - addFormatterTest("%x", v, vs) - addFormatterTest("%x", pv, vAddr) - addFormatterTest("%x", &pv, pvAddr) - - // %#x passthrough with uint. - v2 := int(2147483647) - pv2 := &v2 - v2Addr := fmt.Sprintf("%#x", pv2) - pv2Addr := fmt.Sprintf("%#x", &pv2) - v2s := "0x7fffffff" - addFormatterTest("%#x", v2, v2s) - addFormatterTest("%#x", pv2, v2Addr) - addFormatterTest("%#x", &pv2, pv2Addr) - - // %f passthrough with precision. - addFormatterTest("%.2f", 3.1415, "3.14") - addFormatterTest("%.3f", 3.1415, "3.142") - addFormatterTest("%.4f", 3.1415, "3.1415") - - // %f passthrough with width and precision. - addFormatterTest("%5.2f", 3.1415, " 3.14") - addFormatterTest("%6.3f", 3.1415, " 3.142") - addFormatterTest("%7.4f", 3.1415, " 3.1415") - - // %d passthrough with width. - addFormatterTest("%3d", 127, "127") - addFormatterTest("%4d", 127, " 127") - addFormatterTest("%5d", 127, " 127") - - // %q passthrough with string. - addFormatterTest("%q", "test", "\"test\"") -} - -// TestFormatter executes all of the tests described by formatterTests. -func TestFormatter(t *testing.T) { - // Setup tests. - addIntFormatterTests() - addUintFormatterTests() - addBoolFormatterTests() - addFloatFormatterTests() - addComplexFormatterTests() - addArrayFormatterTests() - addSliceFormatterTests() - addStringFormatterTests() - addInterfaceFormatterTests() - addMapFormatterTests() - addStructFormatterTests() - addUintptrFormatterTests() - addUnsafePointerFormatterTests() - addChanFormatterTests() - addFuncFormatterTests() - addCircularFormatterTests() - addPanicFormatterTests() - addErrorFormatterTests() - addPassthroughFormatterTests() - - t.Logf("Running %d tests", len(formatterTests)) - for i, test := range formatterTests { - buf := new(bytes.Buffer) - spew.Fprintf(buf, test.format, test.in) - s := buf.String() - if testFailed(s, test.wants) { - t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s, - stringizeWants(test.wants)) - continue - } - } -} - -type testStruct struct { - x int -} - -func (ts testStruct) String() string { - return fmt.Sprintf("ts.%d", ts.x) -} - -type testStructP struct { - x int -} - -func (ts *testStructP) String() string { - return fmt.Sprintf("ts.%d", ts.x) -} - -func TestPrintSortedKeys(t *testing.T) { - cfg := spew.ConfigState{SortKeys: true} - s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"}) - expected := "map[1:1 2:2 3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 1:\n %v %v", s, expected) - } - - s = cfg.Sprint(map[stringer]int{"1": 1, "3": 3, "2": 2}) - expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 2:\n %v %v", s, expected) - } - - s = cfg.Sprint(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) - expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" - if spew.UnsafeDisabled { - expected = "map[1:1 2:2 3:3]" - } - if s != expected { - t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected) - } - - s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2}) - expected = "map[ts.1:1 ts.2:2 ts.3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected) - } - - if !spew.UnsafeDisabled { - s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2}) - expected = "map[ts.1:1 ts.2:2 ts.3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected) - } - } - - s = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) - expected = "map[error: 1:1 error: 2:2 error: 3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 6:\n %v %v", s, expected) - } -} diff --git a/vendor/github.com/davecgh/go-spew/spew/internal_test.go b/vendor/github.com/davecgh/go-spew/spew/internal_test.go deleted file mode 100644 index 1069ee21..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/internal_test.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -This test file is part of the spew package rather than than the spew_test -package because it needs access to internals to properly test certain cases -which are not possible via the public interface since they should never happen. -*/ - -package spew - -import ( - "bytes" - "reflect" - "testing" -) - -// dummyFmtState implements a fake fmt.State to use for testing invalid -// reflect.Value handling. This is necessary because the fmt package catches -// invalid values before invoking the formatter on them. -type dummyFmtState struct { - bytes.Buffer -} - -func (dfs *dummyFmtState) Flag(f int) bool { - if f == int('+') { - return true - } - return false -} - -func (dfs *dummyFmtState) Precision() (int, bool) { - return 0, false -} - -func (dfs *dummyFmtState) Width() (int, bool) { - return 0, false -} - -// TestInvalidReflectValue ensures the dump and formatter code handles an -// invalid reflect value properly. This needs access to internal state since it -// should never happen in real code and therefore can't be tested via the public -// API. -func TestInvalidReflectValue(t *testing.T) { - i := 1 - - // Dump invalid reflect value. - v := new(reflect.Value) - buf := new(bytes.Buffer) - d := dumpState{w: buf, cs: &Config} - d.dump(*v) - s := buf.String() - want := "" - if s != want { - t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want) - } - i++ - - // Formatter invalid reflect value. - buf2 := new(dummyFmtState) - f := formatState{value: *v, cs: &Config, fs: buf2} - f.format(*v) - s = buf2.String() - want = "" - if s != want { - t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want) - } -} - -// SortValues makes the internal sortValues function available to the test -// package. -func SortValues(values []reflect.Value, cs *ConfigState) { - sortValues(values, cs) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go deleted file mode 100644 index 83e070e9..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2013-2015 Dave Collins - -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. - -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine and "-tags disableunsafe" -// is not added to the go build command line. -// +build !appengine,!disableunsafe - -/* -This test file is part of the spew package rather than than the spew_test -package because it needs access to internals to properly test certain cases -which are not possible via the public interface since they should never happen. -*/ - -package spew - -import ( - "bytes" - "reflect" - "testing" - "unsafe" -) - -// changeKind uses unsafe to intentionally change the kind of a reflect.Value to -// the maximum kind value which does not exist. This is needed to test the -// fallback code which punts to the standard fmt library for new types that -// might get added to the language. -func changeKind(v *reflect.Value, readOnly bool) { - rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag)) - *rvf = *rvf | ((1< - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew_test - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "testing" - - "github.com/davecgh/go-spew/spew" -) - -// spewFunc is used to identify which public function of the spew package or -// ConfigState a test applies to. -type spewFunc int - -const ( - fCSFdump spewFunc = iota - fCSFprint - fCSFprintf - fCSFprintln - fCSPrint - fCSPrintln - fCSSdump - fCSSprint - fCSSprintf - fCSSprintln - fCSErrorf - fCSNewFormatter - fErrorf - fFprint - fFprintln - fPrint - fPrintln - fSdump - fSprint - fSprintf - fSprintln -) - -// Map of spewFunc values to names for pretty printing. -var spewFuncStrings = map[spewFunc]string{ - fCSFdump: "ConfigState.Fdump", - fCSFprint: "ConfigState.Fprint", - fCSFprintf: "ConfigState.Fprintf", - fCSFprintln: "ConfigState.Fprintln", - fCSSdump: "ConfigState.Sdump", - fCSPrint: "ConfigState.Print", - fCSPrintln: "ConfigState.Println", - fCSSprint: "ConfigState.Sprint", - fCSSprintf: "ConfigState.Sprintf", - fCSSprintln: "ConfigState.Sprintln", - fCSErrorf: "ConfigState.Errorf", - fCSNewFormatter: "ConfigState.NewFormatter", - fErrorf: "spew.Errorf", - fFprint: "spew.Fprint", - fFprintln: "spew.Fprintln", - fPrint: "spew.Print", - fPrintln: "spew.Println", - fSdump: "spew.Sdump", - fSprint: "spew.Sprint", - fSprintf: "spew.Sprintf", - fSprintln: "spew.Sprintln", -} - -func (f spewFunc) String() string { - if s, ok := spewFuncStrings[f]; ok { - return s - } - return fmt.Sprintf("Unknown spewFunc (%d)", int(f)) -} - -// spewTest is used to describe a test to be performed against the public -// functions of the spew package or ConfigState. -type spewTest struct { - cs *spew.ConfigState - f spewFunc - format string - in interface{} - want string -} - -// spewTests houses the tests to be performed against the public functions of -// the spew package and ConfigState. -// -// These tests are only intended to ensure the public functions are exercised -// and are intentionally not exhaustive of types. The exhaustive type -// tests are handled in the dump and format tests. -var spewTests []spewTest - -// redirStdout is a helper function to return the standard output from f as a -// byte slice. -func redirStdout(f func()) ([]byte, error) { - tempFile, err := ioutil.TempFile("", "ss-test") - if err != nil { - return nil, err - } - fileName := tempFile.Name() - defer os.Remove(fileName) // Ignore error - - origStdout := os.Stdout - os.Stdout = tempFile - f() - os.Stdout = origStdout - tempFile.Close() - - return ioutil.ReadFile(fileName) -} - -func initSpewTests() { - // Config states with various settings. - scsDefault := spew.NewDefaultConfig() - scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true} - scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true} - scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1} - scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true} - - // Variables for tests on types which implement Stringer interface with and - // without a pointer receiver. - ts := stringer("test") - tps := pstringer("test") - - // depthTester is used to test max depth handling for structs, array, slices - // and maps. - type depthTester struct { - ic indirCir1 - arr [1]string - slice []string - m map[string]int - } - dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"}, - map[string]int{"one": 1}} - - // Variable for tests on types which implement error interface. - te := customError(10) - - spewTests = []spewTest{ - {scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"}, - {scsDefault, fCSFprint, "", int16(32767), "32767"}, - {scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"}, - {scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"}, - {scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"}, - {scsDefault, fCSPrintln, "", uint8(255), "255\n"}, - {scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"}, - {scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"}, - {scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"}, - {scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"}, - {scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"}, - {scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"}, - {scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"}, - {scsDefault, fFprint, "", float32(3.14), "3.14"}, - {scsDefault, fFprintln, "", float64(6.28), "6.28\n"}, - {scsDefault, fPrint, "", true, "true"}, - {scsDefault, fPrintln, "", false, "false\n"}, - {scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"}, - {scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"}, - {scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"}, - {scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"}, - {scsNoMethods, fCSFprint, "", ts, "test"}, - {scsNoMethods, fCSFprint, "", &ts, "<*>test"}, - {scsNoMethods, fCSFprint, "", tps, "test"}, - {scsNoMethods, fCSFprint, "", &tps, "<*>test"}, - {scsNoPmethods, fCSFprint, "", ts, "stringer test"}, - {scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"}, - {scsNoPmethods, fCSFprint, "", tps, "test"}, - {scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"}, - {scsMaxDepth, fCSFprint, "", dt, "{{} [] [] map[]}"}, - {scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" + - " ic: (spew_test.indirCir1) {\n \n },\n" + - " arr: ([1]string) (len=1 cap=1) {\n \n },\n" + - " slice: ([]string) (len=1 cap=1) {\n \n },\n" + - " m: (map[string]int) (len=1) {\n \n }\n}\n"}, - {scsContinue, fCSFprint, "", ts, "(stringer test) test"}, - {scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " + - "(len=4) (stringer test) \"test\"\n"}, - {scsContinue, fCSFprint, "", te, "(error: 10) 10"}, - {scsContinue, fCSFdump, "", te, "(spew_test.customError) " + - "(error: 10) 10\n"}, - } -} - -// TestSpew executes all of the tests described by spewTests. -func TestSpew(t *testing.T) { - initSpewTests() - - t.Logf("Running %d tests", len(spewTests)) - for i, test := range spewTests { - buf := new(bytes.Buffer) - switch test.f { - case fCSFdump: - test.cs.Fdump(buf, test.in) - - case fCSFprint: - test.cs.Fprint(buf, test.in) - - case fCSFprintf: - test.cs.Fprintf(buf, test.format, test.in) - - case fCSFprintln: - test.cs.Fprintln(buf, test.in) - - case fCSPrint: - b, err := redirStdout(func() { test.cs.Print(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fCSPrintln: - b, err := redirStdout(func() { test.cs.Println(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fCSSdump: - str := test.cs.Sdump(test.in) - buf.WriteString(str) - - case fCSSprint: - str := test.cs.Sprint(test.in) - buf.WriteString(str) - - case fCSSprintf: - str := test.cs.Sprintf(test.format, test.in) - buf.WriteString(str) - - case fCSSprintln: - str := test.cs.Sprintln(test.in) - buf.WriteString(str) - - case fCSErrorf: - err := test.cs.Errorf(test.format, test.in) - buf.WriteString(err.Error()) - - case fCSNewFormatter: - fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in)) - - case fErrorf: - err := spew.Errorf(test.format, test.in) - buf.WriteString(err.Error()) - - case fFprint: - spew.Fprint(buf, test.in) - - case fFprintln: - spew.Fprintln(buf, test.in) - - case fPrint: - b, err := redirStdout(func() { spew.Print(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fPrintln: - b, err := redirStdout(func() { spew.Println(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fSdump: - str := spew.Sdump(test.in) - buf.WriteString(str) - - case fSprint: - str := spew.Sprint(test.in) - buf.WriteString(str) - - case fSprintf: - str := spew.Sprintf(test.format, test.in) - buf.WriteString(str) - - case fSprintln: - str := spew.Sprintln(test.in) - buf.WriteString(str) - - default: - t.Errorf("%v #%d unrecognized function", test.f, i) - continue - } - s := buf.String() - if test.want != s { - t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want) - continue - } - } -} diff --git a/vendor/github.com/docker/containerd/.gitignore b/vendor/github.com/docker/containerd/.gitignore new file mode 100644 index 00000000..4109a6d5 --- /dev/null +++ b/vendor/github.com/docker/containerd/.gitignore @@ -0,0 +1,7 @@ +containerd/containerd +containerd-shim/containerd-shim +bin/ +ctr/ctr +hack/benchmark +*.exe +integration-test/test-artifacts diff --git a/vendor/github.com/docker/containerd/CONTRIBUTING.md b/vendor/github.com/docker/containerd/CONTRIBUTING.md new file mode 100644 index 00000000..b8a512c3 --- /dev/null +++ b/vendor/github.com/docker/containerd/CONTRIBUTING.md @@ -0,0 +1,55 @@ +# Contributing + +## Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/containerd/Dockerfile b/vendor/github.com/docker/containerd/Dockerfile new file mode 100644 index 00000000..23cf54b2 --- /dev/null +++ b/vendor/github.com/docker/containerd/Dockerfile @@ -0,0 +1,51 @@ +FROM debian:jessie + +RUN apt-get update && apt-get install -y \ + build-essential \ + ca-certificates \ + curl \ + git \ + make \ + jq \ + apparmor \ + libapparmor-dev \ + --no-install-recommends \ + && rm -rf /var/lib/apt/lists/* + +# Install Go +ENV GO_VERSION 1.5.3 +RUN curl -sSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar -v -C /usr/local -xz +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/containerd/vendor + +WORKDIR /go/src/github.com/docker/containerd + +# install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.0 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install runc +ENV RUNC_COMMIT d49ece5a83da3dcb820121d6850e2b61bd0a5fbe +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ + && cd "$GOPATH/src/github.com/opencontainers/runc" \ + && git checkout -q "$RUNC_COMMIT" \ + && make BUILDTAGS="seccomp apparmor selinux" && make install + +COPY . /go/src/github.com/docker/containerd + +WORKDIR /go/src/github.com/docker/containerd + +RUN make all install diff --git a/vendor/github.com/docker/containerd/LICENSE.code b/vendor/github.com/docker/containerd/LICENSE.code new file mode 100644 index 00000000..8f3fee62 --- /dev/null +++ b/vendor/github.com/docker/containerd/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/containerd/LICENSE.docs b/vendor/github.com/docker/containerd/LICENSE.docs new file mode 100644 index 00000000..e26cd4fc --- /dev/null +++ b/vendor/github.com/docker/containerd/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/containerd/MAINTAINERS b/vendor/github.com/docker/containerd/MAINTAINERS new file mode 100644 index 00000000..77c5fa7b --- /dev/null +++ b/vendor/github.com/docker/containerd/MAINTAINERS @@ -0,0 +1,41 @@ +# Containerd maintainers file +# +# This file describes who runs the docker/containerd project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "crosbymichael", + "tonistiigi", + "mlaventure", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.crosbymichael] + Name = "Michael Crosby" + Email = "crosbymichael@gmail.com" + GitHub = "crosbymichael" + + [people.tonistiigi] + Name = "Tõnis Tiigi" + Email = "tonis@docker.com" + GitHub = "tonistiigi" + + [people.mlaventure] + Name = "Kenfe-Mickaël Laventure" + Email = "mickael.laventure@docker.com" + GitHub = "mlaventure" + diff --git a/vendor/github.com/docker/containerd/Makefile b/vendor/github.com/docker/containerd/Makefile new file mode 100644 index 00000000..fdd38fcd --- /dev/null +++ b/vendor/github.com/docker/containerd/Makefile @@ -0,0 +1,102 @@ +BUILDTAGS= + +PROJECT=github.com/docker/containerd + +GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true) +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2> /dev/null) + +LDFLAGS := -X github.com/docker/containerd.GitCommit=${GIT_COMMIT} ${LDFLAGS} + +TEST_TIMEOUT ?= 5m +TEST_SUITE_TIMEOUT ?= 10m + +# if this session isn't interactive, then we don't want to allocate a +# TTY, which would fail, but if it is interactive, we do want to attach +# so that the user can send e.g. ^C through. +INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) +ifeq ($(INTERACTIVE), 1) + DOCKER_FLAGS += -t +endif + +TESTBENCH_ARTIFACTS_DIR := output/test-artifacts +TESTBENCH_BUNDLE_DIR := $(TESTBENCH_ARTIFACTS_DIR)/archives + +DOCKER_IMAGE := containerd-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH)) +DOCKER_RUN := docker run --privileged --rm -i $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" + + +export GOPATH:=$(CURDIR)/vendor:$(GOPATH) + +all: client daemon shim + +static: client-static daemon-static shim-static + +bin: + mkdir -p bin/ + +clean: + rm -rf bin && rm -rf output + +client: bin + cd ctr && go build -ldflags "${LDFLAGS}" -o ../bin/ctr + +client-static: + cd ctr && go build -ldflags "-w -extldflags -static ${LDFLAGS}" -tags "$(BUILDTAGS)" -o ../bin/ctr + +daemon: bin + cd containerd && go build -ldflags "${LDFLAGS}" -tags "$(BUILDTAGS)" -o ../bin/containerd + +daemon-static: + cd containerd && go build -ldflags "-w -extldflags -static ${LDFLAGS}" -tags "$(BUILDTAGS)" -o ../bin/containerd + +shim: bin + cd containerd-shim && go build -tags "$(BUILDTAGS)" -ldflags "-w" -o ../bin/containerd-shim + +shim-static: + cd containerd-shim && go build -ldflags "-w -extldflags -static ${LDFLAGS}" -tags "$(BUILDTAGS)" -o ../bin/containerd-shim + +$(TESTBENCH_BUNDLE_DIR)/busybox.tar: + mkdir -p $(TESTBENCH_BUNDLE_DIR) + curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.11/rootfs.tar' -o $(TESTBENCH_BUNDLE_DIR)/busybox.tar + +bundles-rootfs: $(TESTBENCH_BUNDLE_DIR)/busybox.tar + +dbuild: $(TESTBENCH_BUNDLE_DIR)/busybox.tar + @docker build --rm --force-rm -t "$(DOCKER_IMAGE)" . + +dtest: dbuild + $(DOCKER_RUN) make test + +dbench: dbuild + $(DOCKER_RUN) make bench + +install: + cp bin/* /usr/local/bin/ + +protoc: + protoc -I ./api/grpc/types ./api/grpc/types/api.proto --go_out=plugins=grpc:api/grpc/types + +fmt: + @gofmt -s -l . | grep -v vendor | grep -v .pb. | tee /dev/stderr + +lint: + @golint ./... | grep -v vendor | grep -v .pb. | tee /dev/stderr + +shell: dbuild + $(DOCKER_RUN) bash + +test: validate install bundles-rootfs + go test -bench=. -v $(shell go list ./... | grep -v /vendor | grep -v /integration-test) +ifneq ($(wildcard /.dockerenv), ) + cd integration-test ; \ + go test -check.v -check.timeout=$(TEST_TIMEOUT) -timeout=$(TEST_SUITE_TIMEOUT) $(TESTFLAGS) github.com/docker/containerd/integration-test && \ + go test -containerd.shim="" -check.v -check.timeout=$(TEST_TIMEOUT) -timeout=$(TEST_SUITE_TIMEOUT) $(TESTFLAGS) github.com/docker/containerd/integration-test +endif + +bench: shim validate install bundles-rootfs + go test -bench=. -v $(shell go list ./... | grep -v /vendor | grep -v /integration-test) + +validate: fmt + +uninstall: + $(foreach file,containerd containerd-shim ctr,rm /usr/local/bin/$(file);) diff --git a/vendor/github.com/docker/containerd/NOTICE b/vendor/github.com/docker/containerd/NOTICE new file mode 100644 index 00000000..8915f027 --- /dev/null +++ b/vendor/github.com/docker/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/containerd/README.md b/vendor/github.com/docker/containerd/README.md new file mode 100644 index 00000000..6413bdc4 --- /dev/null +++ b/vendor/github.com/docker/containerd/README.md @@ -0,0 +1,72 @@ +# containerd + +containerd is a daemon to control runC, built for performance and density. +containerd leverages runC's advanced features such as seccomp and user namespace support as well +as checkpoint and restore for cloning and live migration of containers. + +## Getting started + +The easiest way to start using containerd is to download binaries from the [releases page](https://github.com/docker/containerd/releases). + +The included `ctr` command-line tool allows you interact with the containerd daemon: + +``` +$ sudo ctr containers start redis /containers/redis +$ sudo ctr containers list +ID PATH STATUS PROCESSES +redis /containers/redis running 14063 +``` + +`/containers/redis` is the path to an OCI bundle. [See the docs for more information.](docs/bundle.md) + +## Docs + + * [Client CLI reference (`ctr`)](docs/cli.md) + * [Daemon CLI reference (`containerd`)](docs/daemon.md) + * [Creating OCI bundles](docs/bundle.md) + * [containerd changes to the bundle](docs/bundle-changes.md) + * [Attaching to STDIO or TTY](docs/attach.md) + * [Telemetry and metrics](docs/telemetry.md) + +All documentation is contained in the `/docs` directory in this repository. + +## Building + +You will need to make sure that you have Go installed on your system and the containerd repository is cloned +in your `$GOPATH`. You will also need to make sure that you have all the dependencies cloned as well. +Currently, contributing to containerd is not for the first time devs as many dependencies are not vendored and +work is being completed at a high rate. + +After that just run `make` and the binaries for the daemon and client will be localed in the `bin/` directory. + +## Performance + +Starting 1000 containers concurrently runs at 126-140 containers per second. + +Overall start times: + +``` +[containerd] 2015/12/04 15:00:54 count: 1000 +[containerd] 2015/12/04 14:59:54 min: 23ms +[containerd] 2015/12/04 14:59:54 max: 355ms +[containerd] 2015/12/04 14:59:54 mean: 78ms +[containerd] 2015/12/04 14:59:54 stddev: 34ms +[containerd] 2015/12/04 14:59:54 median: 73ms +[containerd] 2015/12/04 14:59:54 75%: 91ms +[containerd] 2015/12/04 14:59:54 95%: 123ms +[containerd] 2015/12/04 14:59:54 99%: 287ms +[containerd] 2015/12/04 14:59:54 99.9%: 355ms +``` + +## Roadmap + +The current roadmap and milestones for alpha and beta completion are in the github issues on this repository. Please refer to these issues for what is being worked on and completed for the various stages of development. + +## Copyright and license + +Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code +is released under the Apache 2.0 license. The README.md file, and files in the +"docs" folder are licensed under the Creative Commons Attribution 4.0 +International License under the terms and conditions set forth in the file +"LICENSE.docs". You may obtain a duplicate copy of the same license, titled +CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/containerd/api/grpc/server/server.go b/vendor/github.com/docker/containerd/api/grpc/server/server.go new file mode 100644 index 00000000..55eb7137 --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/server/server.go @@ -0,0 +1,468 @@ +package server + +import ( + "bufio" + "errors" + "fmt" + "os" + "strconv" + "strings" + "syscall" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + + "github.com/docker/containerd" + "github.com/docker/containerd/api/grpc/types" + "github.com/docker/containerd/runtime" + "github.com/docker/containerd/supervisor" + "golang.org/x/net/context" +) + +type apiServer struct { + sv *supervisor.Supervisor +} + +// NewServer returns grpc server instance +func NewServer(sv *supervisor.Supervisor) types.APIServer { + return &apiServer{ + sv: sv, + } +} + +func (s *apiServer) GetServerVersion(ctx context.Context, c *types.GetServerVersionRequest) (*types.GetServerVersionResponse, error) { + return &types.GetServerVersionResponse{ + Major: containerd.VersionMajor, + Minor: containerd.VersionMinor, + Patch: containerd.VersionPatch, + Revision: containerd.GitCommit, + }, nil +} + +func (s *apiServer) CreateContainer(ctx context.Context, c *types.CreateContainerRequest) (*types.CreateContainerResponse, error) { + if c.BundlePath == "" { + return nil, errors.New("empty bundle path") + } + e := &supervisor.StartTask{} + e.ID = c.Id + e.BundlePath = c.BundlePath + e.Stdin = c.Stdin + e.Stdout = c.Stdout + e.Stderr = c.Stderr + e.Labels = c.Labels + e.NoPivotRoot = c.NoPivotRoot + e.Runtime = c.Runtime + e.RuntimeArgs = c.RuntimeArgs + e.StartResponse = make(chan supervisor.StartResponse, 1) + if c.Checkpoint != "" { + e.CheckpointDir = c.CheckpointDir + e.Checkpoint = &runtime.Checkpoint{ + Name: c.Checkpoint, + } + } + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + r := <-e.StartResponse + apiC, err := createAPIContainer(r.Container, false) + if err != nil { + return nil, err + } + return &types.CreateContainerResponse{ + Container: apiC, + }, nil +} + +func (s *apiServer) CreateCheckpoint(ctx context.Context, r *types.CreateCheckpointRequest) (*types.CreateCheckpointResponse, error) { + e := &supervisor.CreateCheckpointTask{} + e.ID = r.Id + e.CheckpointDir = r.CheckpointDir + e.Checkpoint = &runtime.Checkpoint{ + Name: r.Checkpoint.Name, + Exit: r.Checkpoint.Exit, + Tcp: r.Checkpoint.Tcp, + UnixSockets: r.Checkpoint.UnixSockets, + Shell: r.Checkpoint.Shell, + } + + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.CreateCheckpointResponse{}, nil +} + +func (s *apiServer) DeleteCheckpoint(ctx context.Context, r *types.DeleteCheckpointRequest) (*types.DeleteCheckpointResponse, error) { + if r.Name == "" { + return nil, errors.New("checkpoint name cannot be empty") + } + e := &supervisor.DeleteCheckpointTask{} + e.ID = r.Id + e.CheckpointDir = r.CheckpointDir + e.Checkpoint = &runtime.Checkpoint{ + Name: r.Name, + } + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.DeleteCheckpointResponse{}, nil +} + +func (s *apiServer) ListCheckpoint(ctx context.Context, r *types.ListCheckpointRequest) (*types.ListCheckpointResponse, error) { + e := &supervisor.GetContainersTask{} + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + var container runtime.Container + for _, c := range e.Containers { + if c.ID() == r.Id { + container = c + break + } + } + if container == nil { + return nil, grpc.Errorf(codes.NotFound, "no such containers") + } + var out []*types.Checkpoint + checkpoints, err := container.Checkpoints(r.CheckpointDir) + if err != nil { + return nil, err + } + for _, c := range checkpoints { + out = append(out, &types.Checkpoint{ + Name: c.Name, + Tcp: c.Tcp, + Shell: c.Shell, + UnixSockets: c.UnixSockets, + // TODO: figure out timestamp + //Timestamp: c.Timestamp, + }) + } + return &types.ListCheckpointResponse{Checkpoints: out}, nil +} + +func (s *apiServer) Signal(ctx context.Context, r *types.SignalRequest) (*types.SignalResponse, error) { + e := &supervisor.SignalTask{} + e.ID = r.Id + e.PID = r.Pid + e.Signal = syscall.Signal(int(r.Signal)) + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.SignalResponse{}, nil +} + +func (s *apiServer) State(ctx context.Context, r *types.StateRequest) (*types.StateResponse, error) { + e := &supervisor.GetContainersTask{} + e.ID = r.Id + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + m := s.sv.Machine() + state := &types.StateResponse{ + Machine: &types.Machine{ + Cpus: uint32(m.Cpus), + Memory: uint64(m.Memory), + }, + } + for _, c := range e.Containers { + apiC, err := createAPIContainer(c, true) + if err != nil { + return nil, err + } + state.Containers = append(state.Containers, apiC) + } + return state, nil +} + +func createAPIContainer(c runtime.Container, getPids bool) (*types.Container, error) { + processes, err := c.Processes() + if err != nil { + return nil, grpc.Errorf(codes.Internal, "get processes for container: "+err.Error()) + } + var procs []*types.Process + for _, p := range processes { + oldProc := p.Spec() + stdio := p.Stdio() + proc := &types.Process{ + Pid: p.ID(), + SystemPid: uint32(p.SystemPid()), + Terminal: oldProc.Terminal, + Args: oldProc.Args, + Env: oldProc.Env, + Cwd: oldProc.Cwd, + Stdin: stdio.Stdin, + Stdout: stdio.Stdout, + Stderr: stdio.Stderr, + } + proc.User = &types.User{ + Uid: oldProc.User.UID, + Gid: oldProc.User.GID, + AdditionalGids: oldProc.User.AdditionalGids, + } + proc.Capabilities = oldProc.Capabilities + proc.ApparmorProfile = oldProc.ApparmorProfile + proc.SelinuxLabel = oldProc.SelinuxLabel + proc.NoNewPrivileges = oldProc.NoNewPrivileges + for _, rl := range oldProc.Rlimits { + proc.Rlimits = append(proc.Rlimits, &types.Rlimit{ + Type: rl.Type, + Soft: rl.Soft, + Hard: rl.Hard, + }) + } + procs = append(procs, proc) + } + var pids []int + state := c.State() + if getPids && (state == runtime.Running || state == runtime.Paused) { + if pids, err = c.Pids(); err != nil { + return nil, grpc.Errorf(codes.Internal, "get all pids for container: "+err.Error()) + } + } + return &types.Container{ + Id: c.ID(), + BundlePath: c.Path(), + Processes: procs, + Labels: c.Labels(), + Status: string(state), + Pids: toUint32(pids), + Runtime: c.Runtime(), + }, nil +} + +func toUint32(its []int) []uint32 { + o := []uint32{} + for _, i := range its { + o = append(o, uint32(i)) + } + return o +} + +func (s *apiServer) UpdateContainer(ctx context.Context, r *types.UpdateContainerRequest) (*types.UpdateContainerResponse, error) { + e := &supervisor.UpdateTask{} + e.ID = r.Id + e.State = runtime.State(r.Status) + if r.Resources != nil { + rs := r.Resources + e.Resources = &runtime.Resource{} + if rs.CpuShares != 0 { + e.Resources.CPUShares = int64(rs.CpuShares) + } + if rs.BlkioWeight != 0 { + e.Resources.BlkioWeight = uint16(rs.BlkioWeight) + } + if rs.CpuPeriod != 0 { + e.Resources.CPUPeriod = int64(rs.CpuPeriod) + } + if rs.CpuQuota != 0 { + e.Resources.CPUQuota = int64(rs.CpuQuota) + } + if rs.CpusetCpus != "" { + e.Resources.CpusetCpus = rs.CpusetCpus + } + if rs.CpusetMems != "" { + e.Resources.CpusetMems = rs.CpusetMems + } + if rs.KernelMemoryLimit != 0 { + e.Resources.KernelMemory = int64(rs.KernelMemoryLimit) + } + if rs.MemoryLimit != 0 { + e.Resources.Memory = int64(rs.MemoryLimit) + } + if rs.MemoryReservation != 0 { + e.Resources.MemoryReservation = int64(rs.MemoryReservation) + } + if rs.MemorySwap != 0 { + e.Resources.MemorySwap = int64(rs.MemorySwap) + } + } + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.UpdateContainerResponse{}, nil +} + +func (s *apiServer) UpdateProcess(ctx context.Context, r *types.UpdateProcessRequest) (*types.UpdateProcessResponse, error) { + e := &supervisor.UpdateProcessTask{} + e.ID = r.Id + e.PID = r.Pid + e.Height = int(r.Height) + e.Width = int(r.Width) + e.CloseStdin = r.CloseStdin + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.UpdateProcessResponse{}, nil +} + +func (s *apiServer) Events(r *types.EventsRequest, stream types.API_EventsServer) error { + t := time.Time{} + if r.Timestamp != 0 { + t = time.Unix(int64(r.Timestamp), 0) + } + events := s.sv.Events(t) + defer s.sv.Unsubscribe(events) + for e := range events { + if err := stream.Send(&types.Event{ + Id: e.ID, + Type: e.Type, + Timestamp: uint64(e.Timestamp.Unix()), + Pid: e.PID, + Status: uint32(e.Status), + }); err != nil { + return err + } + } + return nil +} + +func convertToPb(st *runtime.Stat) *types.StatsResponse { + pbSt := &types.StatsResponse{ + Timestamp: uint64(st.Timestamp.Unix()), + CgroupStats: &types.CgroupStats{}, + } + systemUsage, _ := getSystemCPUUsage() + pbSt.CgroupStats.CpuStats = &types.CpuStats{ + CpuUsage: &types.CpuUsage{ + TotalUsage: st.Cpu.Usage.Total, + PercpuUsage: st.Cpu.Usage.Percpu, + UsageInKernelmode: st.Cpu.Usage.Kernel, + UsageInUsermode: st.Cpu.Usage.User, + }, + ThrottlingData: &types.ThrottlingData{ + Periods: st.Cpu.Throttling.Periods, + ThrottledPeriods: st.Cpu.Throttling.ThrottledPeriods, + ThrottledTime: st.Cpu.Throttling.ThrottledTime, + }, + SystemUsage: systemUsage, + } + pbSt.CgroupStats.MemoryStats = &types.MemoryStats{ + Cache: st.Memory.Cache, + Usage: &types.MemoryData{ + Usage: st.Memory.Usage.Usage, + MaxUsage: st.Memory.Usage.Max, + Failcnt: st.Memory.Usage.Failcnt, + Limit: st.Memory.Usage.Limit, + }, + SwapUsage: &types.MemoryData{ + Usage: st.Memory.Swap.Usage, + MaxUsage: st.Memory.Swap.Max, + Failcnt: st.Memory.Swap.Failcnt, + Limit: st.Memory.Swap.Limit, + }, + KernelUsage: &types.MemoryData{ + Usage: st.Memory.Kernel.Usage, + MaxUsage: st.Memory.Kernel.Max, + Failcnt: st.Memory.Kernel.Failcnt, + Limit: st.Memory.Kernel.Limit, + }, + Stats: st.Memory.Raw, + } + pbSt.CgroupStats.BlkioStats = &types.BlkioStats{ + IoServiceBytesRecursive: convertBlkioEntryToPb(st.Blkio.IoServiceBytesRecursive), + IoServicedRecursive: convertBlkioEntryToPb(st.Blkio.IoServicedRecursive), + IoQueuedRecursive: convertBlkioEntryToPb(st.Blkio.IoQueuedRecursive), + IoServiceTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoServiceTimeRecursive), + IoWaitTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoWaitTimeRecursive), + IoMergedRecursive: convertBlkioEntryToPb(st.Blkio.IoMergedRecursive), + IoTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoTimeRecursive), + SectorsRecursive: convertBlkioEntryToPb(st.Blkio.SectorsRecursive), + } + pbSt.CgroupStats.HugetlbStats = make(map[string]*types.HugetlbStats) + for k, st := range st.Hugetlb { + pbSt.CgroupStats.HugetlbStats[k] = &types.HugetlbStats{ + Usage: st.Usage, + MaxUsage: st.Max, + Failcnt: st.Failcnt, + } + } + pbSt.CgroupStats.PidsStats = &types.PidsStats{ + Current: st.Pids.Current, + Limit: st.Pids.Limit, + } + return pbSt +} + +func convertBlkioEntryToPb(b []runtime.BlkioEntry) []*types.BlkioStatsEntry { + var pbEs []*types.BlkioStatsEntry + for _, e := range b { + pbEs = append(pbEs, &types.BlkioStatsEntry{ + Major: e.Major, + Minor: e.Minor, + Op: e.Op, + Value: e.Value, + }) + } + return pbEs +} + +const nanoSecondsPerSecond = 1e9 + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. +// +// Uses /proc/stat defined by POSIX. Looks for the cpu +// statistics line and then sums up the first seven fields +// provided. See `man 5 proc` for details on specific field +// information. +func getSystemCPUUsage() (uint64, error) { + var line string + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + bufReader := bufio.NewReaderSize(nil, 128) + defer func() { + bufReader.Reset(nil) + f.Close() + }() + bufReader.Reset(f) + err = nil + for err == nil { + line, err = bufReader.ReadString('\n') + if err != nil { + break + } + parts := strings.Fields(line) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("bad format of cpu stats") + } + var totalClockTicks uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0, fmt.Errorf("error parsing cpu stats") + } + totalClockTicks += v + } + return (totalClockTicks * nanoSecondsPerSecond) / + clockTicksPerSecond, nil + } + } + return 0, fmt.Errorf("bad stats format") +} + +func (s *apiServer) Stats(ctx context.Context, r *types.StatsRequest) (*types.StatsResponse, error) { + e := &supervisor.StatsTask{} + e.ID = r.Id + e.Stat = make(chan *runtime.Stat, 1) + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + stats := <-e.Stat + t := convertToPb(stats) + return t, nil +} diff --git a/vendor/github.com/docker/containerd/api/grpc/server/server_linux.go b/vendor/github.com/docker/containerd/api/grpc/server/server_linux.go new file mode 100644 index 00000000..1051f1f0 --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/server/server_linux.go @@ -0,0 +1,59 @@ +package server + +import ( + "fmt" + + "github.com/docker/containerd/api/grpc/types" + "github.com/docker/containerd/specs" + "github.com/docker/containerd/supervisor" + "github.com/opencontainers/runc/libcontainer/system" + ocs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +var clockTicksPerSecond = uint64(system.GetClockTicks()) + +func (s *apiServer) AddProcess(ctx context.Context, r *types.AddProcessRequest) (*types.AddProcessResponse, error) { + process := &specs.ProcessSpec{ + Terminal: r.Terminal, + Args: r.Args, + Env: r.Env, + Cwd: r.Cwd, + } + process.User = ocs.User{ + UID: r.User.Uid, + GID: r.User.Gid, + AdditionalGids: r.User.AdditionalGids, + } + process.Capabilities = r.Capabilities + process.ApparmorProfile = r.ApparmorProfile + process.SelinuxLabel = r.SelinuxLabel + process.NoNewPrivileges = r.NoNewPrivileges + for _, rl := range r.Rlimits { + process.Rlimits = append(process.Rlimits, ocs.Rlimit{ + Type: rl.Type, + Soft: rl.Soft, + Hard: rl.Hard, + }) + } + if r.Id == "" { + return nil, fmt.Errorf("container id cannot be empty") + } + if r.Pid == "" { + return nil, fmt.Errorf("process id cannot be empty") + } + e := &supervisor.AddProcessTask{} + e.ID = r.Id + e.PID = r.Pid + e.ProcessSpec = process + e.Stdin = r.Stdin + e.Stdout = r.Stdout + e.Stderr = r.Stderr + e.StartResponse = make(chan supervisor.StartResponse, 1) + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + <-e.StartResponse + return &types.AddProcessResponse{}, nil +} diff --git a/vendor/github.com/docker/containerd/api/grpc/server/server_solaris.go b/vendor/github.com/docker/containerd/api/grpc/server/server_solaris.go new file mode 100644 index 00000000..bf64b9a2 --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/server/server_solaris.go @@ -0,0 +1,14 @@ +package server + +import ( + "errors" + + "github.com/docker/containerd/api/grpc/types" + "golang.org/x/net/context" +) + +var clockTicksPerSecond uint64 + +func (s *apiServer) AddProcess(ctx context.Context, r *types.AddProcessRequest) (*types.AddProcessResponse, error) { + return &types.AddProcessResponse{}, errors.New("apiServer AddProcess() not implemented on Solaris") +} diff --git a/vendor/github.com/docker/containerd/api/grpc/types/api.pb.go b/vendor/github.com/docker/containerd/api/grpc/types/api.pb.go new file mode 100644 index 00000000..52c6dcbe --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/types/api.pb.go @@ -0,0 +1,1438 @@ +// Code generated by protoc-gen-go. +// source: api.proto +// DO NOT EDIT! + +/* +Package types is a generated protocol buffer package. + +It is generated from these files: + api.proto + +It has these top-level messages: + GetServerVersionRequest + GetServerVersionResponse + UpdateProcessRequest + UpdateProcessResponse + CreateContainerRequest + CreateContainerResponse + SignalRequest + SignalResponse + AddProcessRequest + Rlimit + User + AddProcessResponse + CreateCheckpointRequest + CreateCheckpointResponse + DeleteCheckpointRequest + DeleteCheckpointResponse + ListCheckpointRequest + Checkpoint + ListCheckpointResponse + StateRequest + ContainerState + Process + Container + Machine + StateResponse + UpdateContainerRequest + UpdateResource + UpdateContainerResponse + EventsRequest + Event + NetworkStats + CpuUsage + ThrottlingData + CpuStats + PidsStats + MemoryData + MemoryStats + BlkioStatsEntry + BlkioStats + HugetlbStats + CgroupStats + StatsResponse + StatsRequest +*/ +package types + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +type GetServerVersionRequest struct { +} + +func (m *GetServerVersionRequest) Reset() { *m = GetServerVersionRequest{} } +func (m *GetServerVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetServerVersionRequest) ProtoMessage() {} +func (*GetServerVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type GetServerVersionResponse struct { + Major uint32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch uint32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + Revision string `protobuf:"bytes,4,opt,name=revision" json:"revision,omitempty"` +} + +func (m *GetServerVersionResponse) Reset() { *m = GetServerVersionResponse{} } +func (m *GetServerVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetServerVersionResponse) ProtoMessage() {} +func (*GetServerVersionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type UpdateProcessRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` + CloseStdin bool `protobuf:"varint,3,opt,name=closeStdin" json:"closeStdin,omitempty"` + Width uint32 `protobuf:"varint,4,opt,name=width" json:"width,omitempty"` + Height uint32 `protobuf:"varint,5,opt,name=height" json:"height,omitempty"` +} + +func (m *UpdateProcessRequest) Reset() { *m = UpdateProcessRequest{} } +func (m *UpdateProcessRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateProcessRequest) ProtoMessage() {} +func (*UpdateProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type UpdateProcessResponse struct { +} + +func (m *UpdateProcessResponse) Reset() { *m = UpdateProcessResponse{} } +func (m *UpdateProcessResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateProcessResponse) ProtoMessage() {} +func (*UpdateProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +type CreateContainerRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` + Checkpoint string `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"` + Stdin string `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"` + Labels []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"` + NoPivotRoot bool `protobuf:"varint,8,opt,name=noPivotRoot" json:"noPivotRoot,omitempty"` + Runtime string `protobuf:"bytes,9,opt,name=runtime" json:"runtime,omitempty"` + RuntimeArgs []string `protobuf:"bytes,10,rep,name=runtimeArgs" json:"runtimeArgs,omitempty"` + CheckpointDir string `protobuf:"bytes,11,opt,name=checkpointDir" json:"checkpointDir,omitempty"` +} + +func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} } +func (m *CreateContainerRequest) String() string { return proto.CompactTextString(m) } +func (*CreateContainerRequest) ProtoMessage() {} +func (*CreateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +type CreateContainerResponse struct { + Container *Container `protobuf:"bytes,1,opt,name=container" json:"container,omitempty"` +} + +func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} } +func (m *CreateContainerResponse) String() string { return proto.CompactTextString(m) } +func (*CreateContainerResponse) ProtoMessage() {} +func (*CreateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *CreateContainerResponse) GetContainer() *Container { + if m != nil { + return m.Container + } + return nil +} + +type SignalRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` + Signal uint32 `protobuf:"varint,3,opt,name=signal" json:"signal,omitempty"` +} + +func (m *SignalRequest) Reset() { *m = SignalRequest{} } +func (m *SignalRequest) String() string { return proto.CompactTextString(m) } +func (*SignalRequest) ProtoMessage() {} +func (*SignalRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +type SignalResponse struct { +} + +func (m *SignalResponse) Reset() { *m = SignalResponse{} } +func (m *SignalResponse) String() string { return proto.CompactTextString(m) } +func (*SignalResponse) ProtoMessage() {} +func (*SignalResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type AddProcessRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"` + User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` + Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"` + Pid string `protobuf:"bytes,7,opt,name=pid" json:"pid,omitempty"` + Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"` + Capabilities []string `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"` + ApparmorProfile string `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"` + SelinuxLabel string `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"` + NoNewPrivileges bool `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"` + Rlimits []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"` +} + +func (m *AddProcessRequest) Reset() { *m = AddProcessRequest{} } +func (m *AddProcessRequest) String() string { return proto.CompactTextString(m) } +func (*AddProcessRequest) ProtoMessage() {} +func (*AddProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *AddProcessRequest) GetUser() *User { + if m != nil { + return m.User + } + return nil +} + +func (m *AddProcessRequest) GetRlimits() []*Rlimit { + if m != nil { + return m.Rlimits + } + return nil +} + +type Rlimit struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Soft uint64 `protobuf:"varint,2,opt,name=soft" json:"soft,omitempty"` + Hard uint64 `protobuf:"varint,3,opt,name=hard" json:"hard,omitempty"` +} + +func (m *Rlimit) Reset() { *m = Rlimit{} } +func (m *Rlimit) String() string { return proto.CompactTextString(m) } +func (*Rlimit) ProtoMessage() {} +func (*Rlimit) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +type User struct { + Uid uint32 `protobuf:"varint,1,opt,name=uid" json:"uid,omitempty"` + Gid uint32 `protobuf:"varint,2,opt,name=gid" json:"gid,omitempty"` + AdditionalGids []uint32 `protobuf:"varint,3,rep,name=additionalGids" json:"additionalGids,omitempty"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +type AddProcessResponse struct { +} + +func (m *AddProcessResponse) Reset() { *m = AddProcessResponse{} } +func (m *AddProcessResponse) String() string { return proto.CompactTextString(m) } +func (*AddProcessResponse) ProtoMessage() {} +func (*AddProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +type CreateCheckpointRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Checkpoint *Checkpoint `protobuf:"bytes,2,opt,name=checkpoint" json:"checkpoint,omitempty"` + CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"` +} + +func (m *CreateCheckpointRequest) Reset() { *m = CreateCheckpointRequest{} } +func (m *CreateCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*CreateCheckpointRequest) ProtoMessage() {} +func (*CreateCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *CreateCheckpointRequest) GetCheckpoint() *Checkpoint { + if m != nil { + return m.Checkpoint + } + return nil +} + +type CreateCheckpointResponse struct { +} + +func (m *CreateCheckpointResponse) Reset() { *m = CreateCheckpointResponse{} } +func (m *CreateCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*CreateCheckpointResponse) ProtoMessage() {} +func (*CreateCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +type DeleteCheckpointRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"` +} + +func (m *DeleteCheckpointRequest) Reset() { *m = DeleteCheckpointRequest{} } +func (m *DeleteCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteCheckpointRequest) ProtoMessage() {} +func (*DeleteCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +type DeleteCheckpointResponse struct { +} + +func (m *DeleteCheckpointResponse) Reset() { *m = DeleteCheckpointResponse{} } +func (m *DeleteCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteCheckpointResponse) ProtoMessage() {} +func (*DeleteCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +type ListCheckpointRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + CheckpointDir string `protobuf:"bytes,2,opt,name=checkpointDir" json:"checkpointDir,omitempty"` +} + +func (m *ListCheckpointRequest) Reset() { *m = ListCheckpointRequest{} } +func (m *ListCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*ListCheckpointRequest) ProtoMessage() {} +func (*ListCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +type Checkpoint struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Exit bool `protobuf:"varint,2,opt,name=exit" json:"exit,omitempty"` + Tcp bool `protobuf:"varint,3,opt,name=tcp" json:"tcp,omitempty"` + UnixSockets bool `protobuf:"varint,4,opt,name=unixSockets" json:"unixSockets,omitempty"` + Shell bool `protobuf:"varint,5,opt,name=shell" json:"shell,omitempty"` +} + +func (m *Checkpoint) Reset() { *m = Checkpoint{} } +func (m *Checkpoint) String() string { return proto.CompactTextString(m) } +func (*Checkpoint) ProtoMessage() {} +func (*Checkpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +type ListCheckpointResponse struct { + Checkpoints []*Checkpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"` +} + +func (m *ListCheckpointResponse) Reset() { *m = ListCheckpointResponse{} } +func (m *ListCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*ListCheckpointResponse) ProtoMessage() {} +func (*ListCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *ListCheckpointResponse) GetCheckpoints() []*Checkpoint { + if m != nil { + return m.Checkpoints + } + return nil +} + +type StateRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *StateRequest) Reset() { *m = StateRequest{} } +func (m *StateRequest) String() string { return proto.CompactTextString(m) } +func (*StateRequest) ProtoMessage() {} +func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +type ContainerState struct { + Status string `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` +} + +func (m *ContainerState) Reset() { *m = ContainerState{} } +func (m *ContainerState) String() string { return proto.CompactTextString(m) } +func (*ContainerState) ProtoMessage() {} +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +type Process struct { + Pid string `protobuf:"bytes,1,opt,name=pid" json:"pid,omitempty"` + Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"` + User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` + Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"` + SystemPid uint32 `protobuf:"varint,7,opt,name=systemPid" json:"systemPid,omitempty"` + Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"` + Capabilities []string `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"` + ApparmorProfile string `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"` + SelinuxLabel string `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"` + NoNewPrivileges bool `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"` + Rlimits []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"` +} + +func (m *Process) Reset() { *m = Process{} } +func (m *Process) String() string { return proto.CompactTextString(m) } +func (*Process) ProtoMessage() {} +func (*Process) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *Process) GetUser() *User { + if m != nil { + return m.User + } + return nil +} + +func (m *Process) GetRlimits() []*Rlimit { + if m != nil { + return m.Rlimits + } + return nil +} + +type Container struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` + Processes []*Process `protobuf:"bytes,3,rep,name=processes" json:"processes,omitempty"` + Status string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` + Labels []string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"` + Pids []uint32 `protobuf:"varint,6,rep,name=pids" json:"pids,omitempty"` + Runtime string `protobuf:"bytes,7,opt,name=runtime" json:"runtime,omitempty"` +} + +func (m *Container) Reset() { *m = Container{} } +func (m *Container) String() string { return proto.CompactTextString(m) } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *Container) GetProcesses() []*Process { + if m != nil { + return m.Processes + } + return nil +} + +// Machine is information about machine on which containerd is run +type Machine struct { + Cpus uint32 `protobuf:"varint,1,opt,name=cpus" json:"cpus,omitempty"` + Memory uint64 `protobuf:"varint,2,opt,name=memory" json:"memory,omitempty"` +} + +func (m *Machine) Reset() { *m = Machine{} } +func (m *Machine) String() string { return proto.CompactTextString(m) } +func (*Machine) ProtoMessage() {} +func (*Machine) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +// StateResponse is information about containerd daemon +type StateResponse struct { + Containers []*Container `protobuf:"bytes,1,rep,name=containers" json:"containers,omitempty"` + Machine *Machine `protobuf:"bytes,2,opt,name=machine" json:"machine,omitempty"` +} + +func (m *StateResponse) Reset() { *m = StateResponse{} } +func (m *StateResponse) String() string { return proto.CompactTextString(m) } +func (*StateResponse) ProtoMessage() {} +func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *StateResponse) GetContainers() []*Container { + if m != nil { + return m.Containers + } + return nil +} + +func (m *StateResponse) GetMachine() *Machine { + if m != nil { + return m.Machine + } + return nil +} + +type UpdateContainerRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` + Resources *UpdateResource `protobuf:"bytes,4,opt,name=resources" json:"resources,omitempty"` +} + +func (m *UpdateContainerRequest) Reset() { *m = UpdateContainerRequest{} } +func (m *UpdateContainerRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateContainerRequest) ProtoMessage() {} +func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *UpdateContainerRequest) GetResources() *UpdateResource { + if m != nil { + return m.Resources + } + return nil +} + +type UpdateResource struct { + BlkioWeight uint32 `protobuf:"varint,1,opt,name=blkioWeight" json:"blkioWeight,omitempty"` + CpuShares uint32 `protobuf:"varint,2,opt,name=cpuShares" json:"cpuShares,omitempty"` + CpuPeriod uint32 `protobuf:"varint,3,opt,name=cpuPeriod" json:"cpuPeriod,omitempty"` + CpuQuota uint32 `protobuf:"varint,4,opt,name=cpuQuota" json:"cpuQuota,omitempty"` + CpusetCpus string `protobuf:"bytes,5,opt,name=cpusetCpus" json:"cpusetCpus,omitempty"` + CpusetMems string `protobuf:"bytes,6,opt,name=cpusetMems" json:"cpusetMems,omitempty"` + MemoryLimit uint32 `protobuf:"varint,7,opt,name=memoryLimit" json:"memoryLimit,omitempty"` + MemorySwap uint32 `protobuf:"varint,8,opt,name=memorySwap" json:"memorySwap,omitempty"` + MemoryReservation uint32 `protobuf:"varint,9,opt,name=memoryReservation" json:"memoryReservation,omitempty"` + KernelMemoryLimit uint32 `protobuf:"varint,10,opt,name=kernelMemoryLimit" json:"kernelMemoryLimit,omitempty"` +} + +func (m *UpdateResource) Reset() { *m = UpdateResource{} } +func (m *UpdateResource) String() string { return proto.CompactTextString(m) } +func (*UpdateResource) ProtoMessage() {} +func (*UpdateResource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +type UpdateContainerResponse struct { +} + +func (m *UpdateContainerResponse) Reset() { *m = UpdateContainerResponse{} } +func (m *UpdateContainerResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateContainerResponse) ProtoMessage() {} +func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +type EventsRequest struct { + Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp" json:"timestamp,omitempty"` +} + +func (m *EventsRequest) Reset() { *m = EventsRequest{} } +func (m *EventsRequest) String() string { return proto.CompactTextString(m) } +func (*EventsRequest) ProtoMessage() {} +func (*EventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +type Event struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + Status uint32 `protobuf:"varint,3,opt,name=status" json:"status,omitempty"` + Pid string `protobuf:"bytes,4,opt,name=pid" json:"pid,omitempty"` + Timestamp uint64 `protobuf:"varint,5,opt,name=timestamp" json:"timestamp,omitempty"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +type NetworkStats struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes" json:"rx_bytes,omitempty"` + Rx_Packets uint64 `protobuf:"varint,3,opt,name=rx_Packets" json:"rx_Packets,omitempty"` + RxErrors uint64 `protobuf:"varint,4,opt,name=Rx_errors" json:"Rx_errors,omitempty"` + RxDropped uint64 `protobuf:"varint,5,opt,name=Rx_dropped" json:"Rx_dropped,omitempty"` + TxBytes uint64 `protobuf:"varint,6,opt,name=Tx_bytes" json:"Tx_bytes,omitempty"` + TxPackets uint64 `protobuf:"varint,7,opt,name=Tx_packets" json:"Tx_packets,omitempty"` + TxErrors uint64 `protobuf:"varint,8,opt,name=Tx_errors" json:"Tx_errors,omitempty"` + TxDropped uint64 `protobuf:"varint,9,opt,name=Tx_dropped" json:"Tx_dropped,omitempty"` +} + +func (m *NetworkStats) Reset() { *m = NetworkStats{} } +func (m *NetworkStats) String() string { return proto.CompactTextString(m) } +func (*NetworkStats) ProtoMessage() {} +func (*NetworkStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type CpuUsage struct { + TotalUsage uint64 `protobuf:"varint,1,opt,name=total_usage" json:"total_usage,omitempty"` + PercpuUsage []uint64 `protobuf:"varint,2,rep,name=percpu_usage" json:"percpu_usage,omitempty"` + UsageInKernelmode uint64 `protobuf:"varint,3,opt,name=usage_in_kernelmode" json:"usage_in_kernelmode,omitempty"` + UsageInUsermode uint64 `protobuf:"varint,4,opt,name=usage_in_usermode" json:"usage_in_usermode,omitempty"` +} + +func (m *CpuUsage) Reset() { *m = CpuUsage{} } +func (m *CpuUsage) String() string { return proto.CompactTextString(m) } +func (*CpuUsage) ProtoMessage() {} +func (*CpuUsage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +type ThrottlingData struct { + Periods uint64 `protobuf:"varint,1,opt,name=periods" json:"periods,omitempty"` + ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods" json:"throttled_periods,omitempty"` + ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time" json:"throttled_time,omitempty"` +} + +func (m *ThrottlingData) Reset() { *m = ThrottlingData{} } +func (m *ThrottlingData) String() string { return proto.CompactTextString(m) } +func (*ThrottlingData) ProtoMessage() {} +func (*ThrottlingData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +type CpuStats struct { + CpuUsage *CpuUsage `protobuf:"bytes,1,opt,name=cpu_usage" json:"cpu_usage,omitempty"` + ThrottlingData *ThrottlingData `protobuf:"bytes,2,opt,name=throttling_data" json:"throttling_data,omitempty"` + SystemUsage uint64 `protobuf:"varint,3,opt,name=system_usage" json:"system_usage,omitempty"` +} + +func (m *CpuStats) Reset() { *m = CpuStats{} } +func (m *CpuStats) String() string { return proto.CompactTextString(m) } +func (*CpuStats) ProtoMessage() {} +func (*CpuStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *CpuStats) GetCpuUsage() *CpuUsage { + if m != nil { + return m.CpuUsage + } + return nil +} + +func (m *CpuStats) GetThrottlingData() *ThrottlingData { + if m != nil { + return m.ThrottlingData + } + return nil +} + +type PidsStats struct { + Current uint64 `protobuf:"varint,1,opt,name=current" json:"current,omitempty"` + Limit uint64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"` +} + +func (m *PidsStats) Reset() { *m = PidsStats{} } +func (m *PidsStats) String() string { return proto.CompactTextString(m) } +func (*PidsStats) ProtoMessage() {} +func (*PidsStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +type MemoryData struct { + Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` + MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage" json:"max_usage,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` + Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` +} + +func (m *MemoryData) Reset() { *m = MemoryData{} } +func (m *MemoryData) String() string { return proto.CompactTextString(m) } +func (*MemoryData) ProtoMessage() {} +func (*MemoryData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +type MemoryStats struct { + Cache uint64 `protobuf:"varint,1,opt,name=cache" json:"cache,omitempty"` + Usage *MemoryData `protobuf:"bytes,2,opt,name=usage" json:"usage,omitempty"` + SwapUsage *MemoryData `protobuf:"bytes,3,opt,name=swap_usage" json:"swap_usage,omitempty"` + KernelUsage *MemoryData `protobuf:"bytes,4,opt,name=kernel_usage" json:"kernel_usage,omitempty"` + Stats map[string]uint64 `protobuf:"bytes,5,rep,name=stats" json:"stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *MemoryStats) Reset() { *m = MemoryStats{} } +func (m *MemoryStats) String() string { return proto.CompactTextString(m) } +func (*MemoryStats) ProtoMessage() {} +func (*MemoryStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *MemoryStats) GetUsage() *MemoryData { + if m != nil { + return m.Usage + } + return nil +} + +func (m *MemoryStats) GetSwapUsage() *MemoryData { + if m != nil { + return m.SwapUsage + } + return nil +} + +func (m *MemoryStats) GetKernelUsage() *MemoryData { + if m != nil { + return m.KernelUsage + } + return nil +} + +func (m *MemoryStats) GetStats() map[string]uint64 { + if m != nil { + return m.Stats + } + return nil +} + +type BlkioStatsEntry struct { + Major uint64 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor uint64 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Op string `protobuf:"bytes,3,opt,name=op" json:"op,omitempty"` + Value uint64 `protobuf:"varint,4,opt,name=value" json:"value,omitempty"` +} + +func (m *BlkioStatsEntry) Reset() { *m = BlkioStatsEntry{} } +func (m *BlkioStatsEntry) String() string { return proto.CompactTextString(m) } +func (*BlkioStatsEntry) ProtoMessage() {} +func (*BlkioStatsEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type BlkioStats struct { + IoServiceBytesRecursive []*BlkioStatsEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive" json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []*BlkioStatsEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive" json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []*BlkioStatsEntry `protobuf:"bytes,3,rep,name=io_queued_recursive" json:"io_queued_recursive,omitempty"` + IoServiceTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive" json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive" json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []*BlkioStatsEntry `protobuf:"bytes,6,rep,name=io_merged_recursive" json:"io_merged_recursive,omitempty"` + IoTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,7,rep,name=io_time_recursive" json:"io_time_recursive,omitempty"` + SectorsRecursive []*BlkioStatsEntry `protobuf:"bytes,8,rep,name=sectors_recursive" json:"sectors_recursive,omitempty"` +} + +func (m *BlkioStats) Reset() { *m = BlkioStats{} } +func (m *BlkioStats) String() string { return proto.CompactTextString(m) } +func (*BlkioStats) ProtoMessage() {} +func (*BlkioStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *BlkioStats) GetIoServiceBytesRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoServiceBytesRecursive + } + return nil +} + +func (m *BlkioStats) GetIoServicedRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoServicedRecursive + } + return nil +} + +func (m *BlkioStats) GetIoQueuedRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoQueuedRecursive + } + return nil +} + +func (m *BlkioStats) GetIoServiceTimeRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoServiceTimeRecursive + } + return nil +} + +func (m *BlkioStats) GetIoWaitTimeRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoWaitTimeRecursive + } + return nil +} + +func (m *BlkioStats) GetIoMergedRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoMergedRecursive + } + return nil +} + +func (m *BlkioStats) GetIoTimeRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoTimeRecursive + } + return nil +} + +func (m *BlkioStats) GetSectorsRecursive() []*BlkioStatsEntry { + if m != nil { + return m.SectorsRecursive + } + return nil +} + +type HugetlbStats struct { + Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` + MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage" json:"max_usage,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` + Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` +} + +func (m *HugetlbStats) Reset() { *m = HugetlbStats{} } +func (m *HugetlbStats) String() string { return proto.CompactTextString(m) } +func (*HugetlbStats) ProtoMessage() {} +func (*HugetlbStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +type CgroupStats struct { + CpuStats *CpuStats `protobuf:"bytes,1,opt,name=cpu_stats" json:"cpu_stats,omitempty"` + MemoryStats *MemoryStats `protobuf:"bytes,2,opt,name=memory_stats" json:"memory_stats,omitempty"` + BlkioStats *BlkioStats `protobuf:"bytes,3,opt,name=blkio_stats" json:"blkio_stats,omitempty"` + HugetlbStats map[string]*HugetlbStats `protobuf:"bytes,4,rep,name=hugetlb_stats" json:"hugetlb_stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + PidsStats *PidsStats `protobuf:"bytes,5,opt,name=pids_stats" json:"pids_stats,omitempty"` +} + +func (m *CgroupStats) Reset() { *m = CgroupStats{} } +func (m *CgroupStats) String() string { return proto.CompactTextString(m) } +func (*CgroupStats) ProtoMessage() {} +func (*CgroupStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *CgroupStats) GetCpuStats() *CpuStats { + if m != nil { + return m.CpuStats + } + return nil +} + +func (m *CgroupStats) GetMemoryStats() *MemoryStats { + if m != nil { + return m.MemoryStats + } + return nil +} + +func (m *CgroupStats) GetBlkioStats() *BlkioStats { + if m != nil { + return m.BlkioStats + } + return nil +} + +func (m *CgroupStats) GetHugetlbStats() map[string]*HugetlbStats { + if m != nil { + return m.HugetlbStats + } + return nil +} + +func (m *CgroupStats) GetPidsStats() *PidsStats { + if m != nil { + return m.PidsStats + } + return nil +} + +type StatsResponse struct { + NetworkStats []*NetworkStats `protobuf:"bytes,1,rep,name=network_stats" json:"network_stats,omitempty"` + CgroupStats *CgroupStats `protobuf:"bytes,2,opt,name=cgroup_stats" json:"cgroup_stats,omitempty"` + Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp" json:"timestamp,omitempty"` +} + +func (m *StatsResponse) Reset() { *m = StatsResponse{} } +func (m *StatsResponse) String() string { return proto.CompactTextString(m) } +func (*StatsResponse) ProtoMessage() {} +func (*StatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +func (m *StatsResponse) GetNetworkStats() []*NetworkStats { + if m != nil { + return m.NetworkStats + } + return nil +} + +func (m *StatsResponse) GetCgroupStats() *CgroupStats { + if m != nil { + return m.CgroupStats + } + return nil +} + +type StatsRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *StatsRequest) Reset() { *m = StatsRequest{} } +func (m *StatsRequest) String() string { return proto.CompactTextString(m) } +func (*StatsRequest) ProtoMessage() {} +func (*StatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +func init() { + proto.RegisterType((*GetServerVersionRequest)(nil), "types.GetServerVersionRequest") + proto.RegisterType((*GetServerVersionResponse)(nil), "types.GetServerVersionResponse") + proto.RegisterType((*UpdateProcessRequest)(nil), "types.UpdateProcessRequest") + proto.RegisterType((*UpdateProcessResponse)(nil), "types.UpdateProcessResponse") + proto.RegisterType((*CreateContainerRequest)(nil), "types.CreateContainerRequest") + proto.RegisterType((*CreateContainerResponse)(nil), "types.CreateContainerResponse") + proto.RegisterType((*SignalRequest)(nil), "types.SignalRequest") + proto.RegisterType((*SignalResponse)(nil), "types.SignalResponse") + proto.RegisterType((*AddProcessRequest)(nil), "types.AddProcessRequest") + proto.RegisterType((*Rlimit)(nil), "types.Rlimit") + proto.RegisterType((*User)(nil), "types.User") + proto.RegisterType((*AddProcessResponse)(nil), "types.AddProcessResponse") + proto.RegisterType((*CreateCheckpointRequest)(nil), "types.CreateCheckpointRequest") + proto.RegisterType((*CreateCheckpointResponse)(nil), "types.CreateCheckpointResponse") + proto.RegisterType((*DeleteCheckpointRequest)(nil), "types.DeleteCheckpointRequest") + proto.RegisterType((*DeleteCheckpointResponse)(nil), "types.DeleteCheckpointResponse") + proto.RegisterType((*ListCheckpointRequest)(nil), "types.ListCheckpointRequest") + proto.RegisterType((*Checkpoint)(nil), "types.Checkpoint") + proto.RegisterType((*ListCheckpointResponse)(nil), "types.ListCheckpointResponse") + proto.RegisterType((*StateRequest)(nil), "types.StateRequest") + proto.RegisterType((*ContainerState)(nil), "types.ContainerState") + proto.RegisterType((*Process)(nil), "types.Process") + proto.RegisterType((*Container)(nil), "types.Container") + proto.RegisterType((*Machine)(nil), "types.Machine") + proto.RegisterType((*StateResponse)(nil), "types.StateResponse") + proto.RegisterType((*UpdateContainerRequest)(nil), "types.UpdateContainerRequest") + proto.RegisterType((*UpdateResource)(nil), "types.UpdateResource") + proto.RegisterType((*UpdateContainerResponse)(nil), "types.UpdateContainerResponse") + proto.RegisterType((*EventsRequest)(nil), "types.EventsRequest") + proto.RegisterType((*Event)(nil), "types.Event") + proto.RegisterType((*NetworkStats)(nil), "types.NetworkStats") + proto.RegisterType((*CpuUsage)(nil), "types.CpuUsage") + proto.RegisterType((*ThrottlingData)(nil), "types.ThrottlingData") + proto.RegisterType((*CpuStats)(nil), "types.CpuStats") + proto.RegisterType((*PidsStats)(nil), "types.PidsStats") + proto.RegisterType((*MemoryData)(nil), "types.MemoryData") + proto.RegisterType((*MemoryStats)(nil), "types.MemoryStats") + proto.RegisterType((*BlkioStatsEntry)(nil), "types.BlkioStatsEntry") + proto.RegisterType((*BlkioStats)(nil), "types.BlkioStats") + proto.RegisterType((*HugetlbStats)(nil), "types.HugetlbStats") + proto.RegisterType((*CgroupStats)(nil), "types.CgroupStats") + proto.RegisterType((*StatsResponse)(nil), "types.StatsResponse") + proto.RegisterType((*StatsRequest)(nil), "types.StatsRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion2 + +// Client API for API service + +type APIClient interface { + GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) + CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) + UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) + Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) + UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) + AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) + CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) + DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) + ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) + State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) + Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) + Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) +} + +type aPIClient struct { + cc *grpc.ClientConn +} + +func NewAPIClient(cc *grpc.ClientConn) APIClient { + return &aPIClient{cc} +} + +func (c *aPIClient) GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) { + out := new(GetServerVersionResponse) + err := grpc.Invoke(ctx, "/types.API/GetServerVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) { + out := new(CreateContainerResponse) + err := grpc.Invoke(ctx, "/types.API/CreateContainer", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) { + out := new(UpdateContainerResponse) + err := grpc.Invoke(ctx, "/types.API/UpdateContainer", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) { + out := new(SignalResponse) + err := grpc.Invoke(ctx, "/types.API/Signal", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) { + out := new(UpdateProcessResponse) + err := grpc.Invoke(ctx, "/types.API/UpdateProcess", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) { + out := new(AddProcessResponse) + err := grpc.Invoke(ctx, "/types.API/AddProcess", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) { + out := new(CreateCheckpointResponse) + err := grpc.Invoke(ctx, "/types.API/CreateCheckpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) { + out := new(DeleteCheckpointResponse) + err := grpc.Invoke(ctx, "/types.API/DeleteCheckpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) { + out := new(ListCheckpointResponse) + err := grpc.Invoke(ctx, "/types.API/ListCheckpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) { + out := new(StateResponse) + err := grpc.Invoke(ctx, "/types.API/State", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_API_serviceDesc.Streams[0], c.cc, "/types.API/Events", opts...) + if err != nil { + return nil, err + } + x := &aPIEventsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type API_EventsClient interface { + Recv() (*Event, error) + grpc.ClientStream +} + +type aPIEventsClient struct { + grpc.ClientStream +} + +func (x *aPIEventsClient) Recv() (*Event, error) { + m := new(Event) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *aPIClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) { + out := new(StatsResponse) + err := grpc.Invoke(ctx, "/types.API/Stats", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for API service + +type APIServer interface { + GetServerVersion(context.Context, *GetServerVersionRequest) (*GetServerVersionResponse, error) + CreateContainer(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) + UpdateContainer(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) + Signal(context.Context, *SignalRequest) (*SignalResponse, error) + UpdateProcess(context.Context, *UpdateProcessRequest) (*UpdateProcessResponse, error) + AddProcess(context.Context, *AddProcessRequest) (*AddProcessResponse, error) + CreateCheckpoint(context.Context, *CreateCheckpointRequest) (*CreateCheckpointResponse, error) + DeleteCheckpoint(context.Context, *DeleteCheckpointRequest) (*DeleteCheckpointResponse, error) + ListCheckpoint(context.Context, *ListCheckpointRequest) (*ListCheckpointResponse, error) + State(context.Context, *StateRequest) (*StateResponse, error) + Events(*EventsRequest, API_EventsServer) error + Stats(context.Context, *StatsRequest) (*StatsResponse, error) +} + +func RegisterAPIServer(s *grpc.Server, srv APIServer) { + s.RegisterService(&_API_serviceDesc, srv) +} + +func _API_GetServerVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServerVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).GetServerVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/GetServerVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).GetServerVersion(ctx, req.(*GetServerVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).CreateContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/CreateContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).CreateContainer(ctx, req.(*CreateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_UpdateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).UpdateContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/UpdateContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).UpdateContainer(ctx, req.(*UpdateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_Signal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignalRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).Signal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/Signal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Signal(ctx, req.(*SignalRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_UpdateProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateProcessRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).UpdateProcess(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/UpdateProcess", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).UpdateProcess(ctx, req.(*UpdateProcessRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_AddProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddProcessRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).AddProcess(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/AddProcess", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).AddProcess(ctx, req.(*AddProcessRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_CreateCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCheckpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).CreateCheckpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/CreateCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).CreateCheckpoint(ctx, req.(*CreateCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_DeleteCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteCheckpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).DeleteCheckpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/DeleteCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).DeleteCheckpoint(ctx, req.(*DeleteCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_ListCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCheckpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).ListCheckpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/ListCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).ListCheckpoint(ctx, req.(*ListCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).State(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/State", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).State(ctx, req.(*StateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_Events_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(EventsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(APIServer).Events(m, &aPIEventsServer{stream}) +} + +type API_EventsServer interface { + Send(*Event) error + grpc.ServerStream +} + +type aPIEventsServer struct { + grpc.ServerStream +} + +func (x *aPIEventsServer) Send(m *Event) error { + return x.ServerStream.SendMsg(m) +} + +func _API_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).Stats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/Stats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Stats(ctx, req.(*StatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _API_serviceDesc = grpc.ServiceDesc{ + ServiceName: "types.API", + HandlerType: (*APIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetServerVersion", + Handler: _API_GetServerVersion_Handler, + }, + { + MethodName: "CreateContainer", + Handler: _API_CreateContainer_Handler, + }, + { + MethodName: "UpdateContainer", + Handler: _API_UpdateContainer_Handler, + }, + { + MethodName: "Signal", + Handler: _API_Signal_Handler, + }, + { + MethodName: "UpdateProcess", + Handler: _API_UpdateProcess_Handler, + }, + { + MethodName: "AddProcess", + Handler: _API_AddProcess_Handler, + }, + { + MethodName: "CreateCheckpoint", + Handler: _API_CreateCheckpoint_Handler, + }, + { + MethodName: "DeleteCheckpoint", + Handler: _API_DeleteCheckpoint_Handler, + }, + { + MethodName: "ListCheckpoint", + Handler: _API_ListCheckpoint_Handler, + }, + { + MethodName: "State", + Handler: _API_State_Handler, + }, + { + MethodName: "Stats", + Handler: _API_Stats_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Events", + Handler: _API_Events_Handler, + ServerStreams: true, + }, + }, +} + +var fileDescriptor0 = []byte{ + // 1918 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x58, 0x5b, 0x8f, 0x1b, 0x49, + 0x15, 0xce, 0xd8, 0x1e, 0xcf, 0xf8, 0xf8, 0x32, 0x99, 0xce, 0x5c, 0x1c, 0x2f, 0x9b, 0x0d, 0xcd, + 0xc2, 0x46, 0xb0, 0x1a, 0x2d, 0x5e, 0x2e, 0x4b, 0x90, 0x10, 0x61, 0xb2, 0xda, 0x05, 0x25, 0xc1, + 0x3b, 0x93, 0x2c, 0xe2, 0xc9, 0xea, 0xe9, 0xae, 0xd8, 0xc5, 0xb4, 0xbb, 0x9b, 0xaa, 0xea, 0xb9, + 0xfc, 0x08, 0x7e, 0x09, 0x12, 0xe2, 0x89, 0x1f, 0xc0, 0xff, 0xe0, 0x8d, 0x27, 0x24, 0xfe, 0x03, + 0xa7, 0x4e, 0x55, 0xb5, 0xbb, 0xdb, 0xf6, 0x04, 0x09, 0xf1, 0xb0, 0x2f, 0x96, 0xab, 0xea, 0x5c, + 0xbf, 0x73, 0xa9, 0x53, 0x0d, 0x9d, 0x20, 0xe3, 0x27, 0x99, 0x48, 0x55, 0xea, 0x6d, 0xab, 0xdb, + 0x8c, 0x49, 0xff, 0x21, 0x1c, 0x7f, 0xc1, 0xd4, 0x39, 0x13, 0x57, 0x4c, 0x7c, 0xcd, 0x84, 0xe4, + 0x69, 0x72, 0xc6, 0xfe, 0x98, 0x33, 0xa9, 0xfc, 0xdf, 0xc3, 0x70, 0xf5, 0x48, 0x66, 0x69, 0x22, + 0x99, 0xd7, 0x87, 0xed, 0x45, 0xf0, 0x87, 0x54, 0x0c, 0xb7, 0x1e, 0x6f, 0x3d, 0xe9, 0xd3, 0x92, + 0x27, 0xb8, 0x6c, 0xb8, 0x65, 0x16, 0xa8, 0x70, 0x3e, 0x6c, 0xd2, 0xf2, 0x3e, 0xec, 0x0a, 0x76, + 0xc5, 0xb5, 0x80, 0x61, 0x0b, 0x77, 0x3a, 0xfe, 0x05, 0x1c, 0xbc, 0xc9, 0xa2, 0x40, 0xb1, 0x89, + 0x48, 0x43, 0x26, 0xa5, 0x55, 0xe9, 0x01, 0x34, 0x78, 0x44, 0x32, 0x3b, 0x5e, 0x17, 0x9a, 0x19, + 0x2e, 0x1a, 0xb4, 0xc0, 0x93, 0x30, 0x4e, 0x25, 0x3b, 0x57, 0x11, 0x4f, 0x48, 0xec, 0xae, 0xd6, + 0x72, 0xcd, 0x23, 0x35, 0x27, 0x99, 0x7d, 0x6f, 0x00, 0xed, 0x39, 0xe3, 0xb3, 0xb9, 0x1a, 0x6e, + 0xeb, 0xb5, 0x7f, 0x0c, 0x87, 0x35, 0x1d, 0xc6, 0x76, 0xff, 0x1f, 0x5b, 0x70, 0x74, 0x2a, 0x18, + 0x9e, 0x9c, 0xa6, 0x89, 0x0a, 0x78, 0xc2, 0xc4, 0x3a, 0xfd, 0xb8, 0xb8, 0xc8, 0x93, 0x28, 0x66, + 0x93, 0x00, 0x75, 0x2c, 0xcd, 0x98, 0xb3, 0xf0, 0x32, 0x4b, 0x79, 0xa2, 0xc8, 0x8c, 0x8e, 0x36, + 0x43, 0x92, 0x55, 0xe4, 0x9a, 0x36, 0x03, 0x97, 0x69, 0x6e, 0xcc, 0x70, 0x6b, 0x26, 0xc4, 0xb0, + 0xed, 0xd6, 0x71, 0x70, 0xc1, 0x62, 0x39, 0xdc, 0x79, 0xdc, 0xc4, 0xf5, 0x03, 0xe8, 0x26, 0xe9, + 0x84, 0x5f, 0xa5, 0xea, 0x2c, 0x4d, 0xd5, 0x70, 0x97, 0x5c, 0xdb, 0x83, 0x1d, 0x91, 0x27, 0x8a, + 0x2f, 0xd8, 0xb0, 0x43, 0x5c, 0x48, 0x65, 0x37, 0x9e, 0x89, 0x99, 0x1c, 0x02, 0xb1, 0x1e, 0x42, + 0x7f, 0x69, 0xcd, 0x73, 0x2e, 0x86, 0x5d, 0x02, 0xf7, 0x17, 0x70, 0xbc, 0xe2, 0x9e, 0x0d, 0xdb, + 0x77, 0xa0, 0x13, 0xba, 0x4d, 0x72, 0xb3, 0x3b, 0xbe, 0x7f, 0x42, 0x89, 0x70, 0x52, 0x10, 0xfb, + 0x9f, 0x41, 0xff, 0x9c, 0xcf, 0x92, 0x20, 0x7e, 0x67, 0x54, 0xb4, 0x6f, 0x44, 0x69, 0x02, 0xed, + 0xdf, 0x87, 0x81, 0xe3, 0xb4, 0x58, 0xff, 0xa5, 0x01, 0xfb, 0xcf, 0xa2, 0xe8, 0x8e, 0x30, 0x63, + 0x72, 0x28, 0x26, 0x30, 0x7b, 0x50, 0x4a, 0x83, 0x9c, 0x7f, 0x08, 0xad, 0x5c, 0xa2, 0x7d, 0x4d, + 0xb2, 0xaf, 0x6b, 0xed, 0x7b, 0x83, 0x5b, 0x5e, 0x0f, 0x5a, 0x81, 0xf6, 0xbf, 0x45, 0xfe, 0xa3, + 0x2d, 0x2c, 0xb9, 0x42, 0x9c, 0xed, 0x22, 0xbc, 0x8e, 0x2c, 0xc8, 0xd6, 0xca, 0x9d, 0x6a, 0x80, + 0x76, 0x6b, 0x01, 0xea, 0xd4, 0x02, 0x04, 0xb4, 0x3e, 0x80, 0x5e, 0x18, 0x64, 0xc1, 0x05, 0x8f, + 0xb9, 0xe2, 0x4c, 0x22, 0xa8, 0x5a, 0xfc, 0x31, 0xec, 0x05, 0x59, 0x16, 0x88, 0x45, 0x2a, 0xd0, + 0x99, 0xb7, 0x3c, 0x66, 0xc3, 0x9e, 0x23, 0x97, 0x2c, 0xe6, 0x49, 0x7e, 0xf3, 0x42, 0x87, 0x75, + 0xd8, 0xa7, 0x5d, 0x24, 0x4f, 0xd2, 0x57, 0xec, 0x7a, 0x22, 0xf8, 0x15, 0xd2, 0xce, 0x50, 0xce, + 0x80, 0x9c, 0x7b, 0x84, 0x91, 0x8d, 0xf9, 0x82, 0x2b, 0x39, 0xdc, 0x43, 0xc1, 0xdd, 0x71, 0xdf, + 0xfa, 0x77, 0x46, 0xbb, 0xfe, 0x18, 0xda, 0xe6, 0x9f, 0xf6, 0x55, 0x9f, 0x58, 0x98, 0x70, 0x25, + 0xd3, 0xb7, 0x8a, 0x20, 0x6a, 0xe9, 0xd5, 0x3c, 0x10, 0x11, 0x41, 0xd4, 0xc2, 0x80, 0xb5, 0x08, + 0x1d, 0xf4, 0x3a, 0xb7, 0xb8, 0xf6, 0xf5, 0x62, 0x66, 0x03, 0xd5, 0xf7, 0x8e, 0x60, 0x10, 0x44, + 0x11, 0xfa, 0x93, 0x22, 0xcc, 0x5f, 0xf0, 0x48, 0x22, 0x67, 0x13, 0x03, 0x76, 0x00, 0x5e, 0x39, + 0x3a, 0x36, 0x68, 0x61, 0x91, 0x40, 0x45, 0x76, 0xad, 0x8b, 0xdc, 0x77, 0x2b, 0xc5, 0xd0, 0xa0, + 0x68, 0xed, 0xbb, 0x6c, 0x2a, 0x0e, 0x56, 0xb3, 0x94, 0xca, 0xc6, 0x1f, 0xc1, 0x70, 0x55, 0x89, + 0x35, 0xe0, 0x37, 0x70, 0xfc, 0x9c, 0xc5, 0xec, 0x5d, 0x06, 0x20, 0x0a, 0x49, 0x80, 0x25, 0x62, + 0x92, 0x71, 0xb3, 0x9e, 0x55, 0x59, 0x56, 0xcf, 0x53, 0x38, 0x7c, 0xc1, 0xa5, 0xba, 0x5b, 0xcb, + 0x8a, 0x5c, 0x52, 0x87, 0xdd, 0x11, 0x4a, 0x4e, 0x3a, 0x53, 0x0a, 0xc3, 0xd8, 0x0d, 0x57, 0x36, + 0x9f, 0x31, 0x12, 0x2a, 0xcc, 0x6c, 0xd3, 0xc2, 0x42, 0xce, 0x13, 0x7e, 0x73, 0x9e, 0x86, 0x97, + 0x4c, 0x49, 0xea, 0x19, 0xd4, 0xc9, 0xe4, 0x9c, 0xc5, 0x31, 0xb5, 0x8c, 0x5d, 0xff, 0x97, 0x70, + 0x54, 0x37, 0xcb, 0xd6, 0xef, 0xf7, 0xa0, 0xbb, 0xb4, 0x45, 0xa2, 0xb6, 0xe6, 0x5a, 0xcc, 0xd1, + 0xe9, 0xde, 0xb9, 0x42, 0x6c, 0xd7, 0xf8, 0xe3, 0x3f, 0x86, 0x41, 0x51, 0xeb, 0x44, 0x64, 0x2a, + 0x20, 0x50, 0xb9, 0xb4, 0x14, 0x7f, 0x6e, 0xc0, 0x8e, 0xcd, 0x09, 0x57, 0x49, 0xff, 0xc7, 0x5a, + 0xdd, 0x87, 0x8e, 0xbc, 0x95, 0x8a, 0x2d, 0x26, 0xb6, 0x62, 0xfb, 0xdf, 0xac, 0x8a, 0xfd, 0xd3, + 0x16, 0x74, 0x0a, 0x40, 0xdf, 0x79, 0x83, 0x7c, 0x1b, 0x3a, 0x99, 0x81, 0x96, 0x99, 0x22, 0xec, + 0x8e, 0x07, 0x56, 0x9e, 0x83, 0x7c, 0x19, 0x8e, 0x56, 0xed, 0xc6, 0x30, 0xe8, 0x21, 0xb0, 0x99, + 0x2e, 0xe1, 0xb6, 0x2e, 0xe1, 0xf2, 0x55, 0x41, 0xed, 0xce, 0xff, 0x08, 0x76, 0x5e, 0x06, 0xe1, + 0x1c, 0xad, 0xd1, 0x94, 0x61, 0x66, 0xc3, 0x4a, 0x17, 0xe4, 0x82, 0x21, 0x1a, 0xb7, 0xa6, 0x89, + 0xf8, 0x5f, 0x63, 0x9f, 0x37, 0x49, 0x62, 0xb3, 0xeb, 0x43, 0x2c, 0x68, 0xe7, 0x88, 0x4b, 0xae, + 0x95, 0xeb, 0xc1, 0xfb, 0x00, 0x76, 0x16, 0x46, 0xbe, 0xad, 0x79, 0x67, 0xbf, 0xd5, 0xea, 0x5f, + 0xc2, 0x91, 0xb9, 0x78, 0xef, 0xbc, 0x5e, 0x57, 0x2e, 0x12, 0xe3, 0xb2, 0xb9, 0x53, 0x9f, 0x40, + 0x47, 0x30, 0x99, 0xe6, 0x02, 0x01, 0x21, 0x14, 0xba, 0xe3, 0x43, 0x97, 0x5b, 0x24, 0xfa, 0xcc, + 0x9e, 0xfa, 0xff, 0xdc, 0x82, 0x41, 0x75, 0x4b, 0x97, 0xd8, 0x45, 0x7c, 0xc9, 0xd3, 0xdf, 0x99, + 0x69, 0xc0, 0x38, 0x8f, 0x59, 0x86, 0x50, 0x9c, 0x63, 0xd7, 0x44, 0x89, 0x8d, 0xd2, 0xd6, 0x84, + 0x09, 0x9e, 0x46, 0xcb, 0x49, 0x05, 0xb7, 0xbe, 0xca, 0x53, 0x15, 0xd8, 0xa9, 0x42, 0xdf, 0xf8, + 0x08, 0x21, 0x53, 0xa7, 0x1a, 0xc8, 0xed, 0x62, 0x0a, 0xa0, 0xbd, 0x97, 0x6c, 0x21, 0x6d, 0x16, + 0xa3, 0x52, 0x03, 0xee, 0x0b, 0x9d, 0x14, 0x36, 0x8f, 0x91, 0xd0, 0x6c, 0x9e, 0x5f, 0x07, 0x19, + 0x25, 0x73, 0x1f, 0x2b, 0x66, 0xdf, 0xec, 0xa1, 0xbd, 0x38, 0x5a, 0x05, 0xba, 0x27, 0x53, 0x5e, + 0xd3, 0xd1, 0x25, 0x13, 0x09, 0x8b, 0x5f, 0x96, 0x24, 0x01, 0xdd, 0xac, 0x38, 0xa6, 0xad, 0x60, + 0x6a, 0x9b, 0x98, 0x0f, 0xfd, 0xcf, 0xaf, 0x18, 0xb6, 0x03, 0x87, 0x32, 0xfa, 0xa5, 0xd3, 0x01, + 0x01, 0x5d, 0x64, 0xe4, 0x7d, 0xcb, 0xff, 0x0a, 0xb6, 0x89, 0xa6, 0x76, 0xa9, 0x98, 0x78, 0xac, + 0x0b, 0x41, 0xdf, 0xc5, 0xa7, 0xe5, 0x6a, 0x74, 0x29, 0x72, 0x9b, 0x44, 0xfe, 0x6d, 0x0b, 0x7a, + 0xaf, 0x98, 0xba, 0x4e, 0xc5, 0xa5, 0xce, 0x22, 0x59, 0x6b, 0x81, 0x7a, 0xe6, 0xbb, 0x99, 0x5e, + 0xdc, 0x2a, 0x0b, 0x77, 0x4b, 0x83, 0x81, 0x3b, 0x93, 0xc0, 0x34, 0x3e, 0xba, 0xb9, 0xb4, 0xdc, + 0xb3, 0x9b, 0x29, 0x56, 0x72, 0x2a, 0x4c, 0x9c, 0x89, 0x0c, 0xb7, 0x22, 0x91, 0x66, 0x19, 0x8b, + 0x8c, 0x2e, 0x2d, 0xec, 0xb5, 0x13, 0xd6, 0x76, 0x54, 0xb8, 0x93, 0x59, 0x61, 0x3b, 0x4e, 0xd8, + 0xeb, 0x42, 0xd8, 0x6e, 0x89, 0xcc, 0x09, 0xeb, 0x90, 0xe1, 0x0b, 0xd8, 0xc5, 0x58, 0xbe, 0x91, + 0xc1, 0x8c, 0x52, 0x45, 0x61, 0xac, 0xe3, 0x69, 0xae, 0x97, 0x06, 0x2c, 0xdd, 0x1f, 0x32, 0x26, + 0x30, 0xc2, 0x76, 0xb7, 0x81, 0x85, 0xd0, 0xf2, 0xde, 0x83, 0x07, 0xb4, 0x9c, 0xf2, 0x64, 0x6a, + 0xa2, 0xb4, 0x48, 0x23, 0x66, 0xfd, 0xc0, 0xc8, 0x15, 0x87, 0xba, 0x1f, 0xd2, 0x11, 0xf9, 0xe3, + 0xbf, 0x86, 0xc1, 0xeb, 0x39, 0x4e, 0xdc, 0x0a, 0x3b, 0xce, 0xec, 0x79, 0xa0, 0x02, 0x5d, 0xb1, + 0x19, 0x25, 0x9d, 0xb4, 0x0a, 0x91, 0x5b, 0x19, 0x12, 0x16, 0x4d, 0xdd, 0x91, 0x01, 0x0d, 0x2f, + 0xee, 0xe5, 0x11, 0x15, 0xb9, 0xb9, 0xf2, 0x15, 0x39, 0x61, 0x80, 0xf7, 0x29, 0x8f, 0x4b, 0x2e, + 0x74, 0xc7, 0x7b, 0xae, 0x6a, 0x9d, 0xa3, 0x27, 0xb0, 0xa7, 0x0a, 0x2b, 0xa6, 0x98, 0x48, 0x81, + 0x2d, 0x5e, 0x57, 0x56, 0x35, 0x1b, 0x75, 0x8f, 0xa4, 0xa6, 0x6c, 0xc5, 0x1a, 0xad, 0x3f, 0x80, + 0x0e, 0x36, 0x69, 0x69, 0xd4, 0xa2, 0x1b, 0x61, 0x2e, 0x04, 0x66, 0x95, 0x75, 0x03, 0xbb, 0x36, + 0x75, 0x44, 0xdb, 0x5e, 0x5e, 0x01, 0x98, 0x3c, 0x26, 0x81, 0x78, 0x58, 0xc6, 0x18, 0x63, 0xb5, + 0x08, 0x6e, 0x0a, 0x80, 0xf5, 0x16, 0xca, 0x7b, 0x1b, 0xf0, 0x38, 0xb4, 0x83, 0x75, 0x49, 0x9e, + 0x01, 0xf2, 0x5f, 0x5b, 0xd0, 0x35, 0x02, 0x8d, 0x7e, 0x3c, 0x0e, 0xb1, 0xe3, 0x38, 0x89, 0x8f, + 0x9d, 0x82, 0xea, 0x20, 0x52, 0x32, 0x01, 0xe7, 0x15, 0x89, 0x75, 0x58, 0xf2, 0x68, 0x2d, 0xd9, + 0x47, 0xd0, 0x33, 0xf1, 0xb5, 0x84, 0xad, 0x4d, 0x84, 0x1f, 0xeb, 0x5b, 0x0a, 0x2d, 0xa1, 0xb6, + 0xdc, 0x1d, 0xbf, 0x5f, 0xa1, 0x20, 0x1b, 0x4f, 0xe8, 0xf7, 0xf3, 0x44, 0x89, 0xdb, 0xd1, 0xc7, + 0x00, 0xcb, 0x95, 0xae, 0xae, 0x4b, 0x76, 0x6b, 0x6b, 0x05, 0x3d, 0xb9, 0x0a, 0xe2, 0xdc, 0x02, + 0xf1, 0xb4, 0xf1, 0xd9, 0x16, 0x4e, 0x40, 0x7b, 0xbf, 0xd2, 0x3d, 0xac, 0xc4, 0x52, 0x79, 0x72, + 0xb5, 0xaa, 0x4f, 0xae, 0x96, 0x2e, 0xe5, 0x34, 0x5b, 0xbe, 0x48, 0x8c, 0x3c, 0x03, 0xdc, 0xdf, + 0x9b, 0x00, 0x4b, 0x61, 0xde, 0x53, 0x18, 0xf1, 0x74, 0xaa, 0x7b, 0x0f, 0x0f, 0x99, 0x29, 0xaa, + 0xa9, 0x60, 0x18, 0x4a, 0xc9, 0xaf, 0x98, 0xed, 0xfa, 0x47, 0xd6, 0x97, 0xba, 0x0d, 0x3f, 0x86, + 0xc3, 0x25, 0x6f, 0x54, 0x62, 0x6b, 0xdc, 0xc9, 0xf6, 0x29, 0x3c, 0x40, 0x36, 0xec, 0x4e, 0x79, + 0x85, 0xa9, 0x79, 0x27, 0xd3, 0xcf, 0xe0, 0x61, 0xc9, 0x4e, 0x9d, 0xfb, 0x25, 0xd6, 0xd6, 0x9d, + 0xac, 0x3f, 0x81, 0x23, 0x64, 0xbd, 0x0e, 0xb8, 0xaa, 0xf3, 0x6d, 0xff, 0x17, 0x76, 0x2e, 0x98, + 0x98, 0x55, 0xec, 0x6c, 0xdf, 0xc9, 0xf4, 0x43, 0xd8, 0x47, 0xa6, 0x9a, 0x9e, 0x9d, 0x77, 0xb1, + 0x48, 0x16, 0x2a, 0xec, 0x53, 0x25, 0x96, 0xdd, 0xbb, 0x58, 0xfc, 0x09, 0xf4, 0xbe, 0xcc, 0x67, + 0x4c, 0xc5, 0x17, 0x45, 0xf6, 0xff, 0x8f, 0xf5, 0xf4, 0xd7, 0x06, 0x74, 0x4f, 0x67, 0x22, 0xcd, + 0xb3, 0x4a, 0x1b, 0x31, 0x29, 0xbd, 0xd2, 0x46, 0x0c, 0xcd, 0x13, 0xe8, 0x99, 0xcb, 0xcb, 0x92, + 0x99, 0x5a, 0xf3, 0x56, 0x33, 0x5f, 0x4f, 0xaa, 0x74, 0x09, 0x5b, 0xc2, 0x6a, 0xb5, 0x95, 0xb2, + 0xf1, 0xe7, 0xd0, 0x9f, 0x1b, 0xbf, 0x2c, 0xa5, 0x89, 0xec, 0x87, 0x4e, 0xf3, 0xd2, 0xc0, 0x93, + 0xb2, 0xff, 0x06, 0x47, 0x1c, 0x58, 0xf4, 0x24, 0x34, 0x75, 0x65, 0x58, 0x7e, 0xcf, 0x16, 0x8d, + 0x6a, 0xf4, 0x25, 0xec, 0xaf, 0xb2, 0x56, 0x0a, 0xd0, 0x2f, 0x17, 0x60, 0x77, 0xfc, 0xc0, 0x8a, + 0x28, 0x73, 0x51, 0x55, 0xde, 0x98, 0x89, 0xa9, 0x78, 0x29, 0x79, 0xdf, 0x87, 0x7e, 0x62, 0xee, + 0xc0, 0x02, 0xb7, 0x66, 0x49, 0x40, 0xe5, 0x7e, 0x44, 0xec, 0x42, 0xf2, 0x66, 0x2d, 0x76, 0xe5, + 0x48, 0x54, 0x6e, 0x5b, 0xd3, 0x79, 0xed, 0x40, 0xbf, 0xee, 0x05, 0x3d, 0xfe, 0x77, 0x1b, 0x9a, + 0xcf, 0x26, 0xbf, 0xf6, 0xde, 0xc0, 0xfd, 0xfa, 0xf7, 0x1a, 0xef, 0x91, 0x15, 0xbf, 0xe1, 0x1b, + 0xcf, 0xe8, 0x83, 0x8d, 0xe7, 0x76, 0xba, 0xb8, 0xe7, 0x9d, 0xc1, 0x5e, 0xed, 0x73, 0x82, 0xe7, + 0x5a, 0xdd, 0xfa, 0xaf, 0x28, 0xa3, 0x47, 0x9b, 0x8e, 0xcb, 0x32, 0x6b, 0xe3, 0x4c, 0x21, 0x73, + 0xfd, 0xe8, 0x58, 0xc8, 0xdc, 0x34, 0x05, 0xdd, 0xf3, 0x7e, 0x0a, 0x6d, 0xf3, 0xf1, 0xc1, 0x3b, + 0xb0, 0xb4, 0x95, 0xaf, 0x18, 0xa3, 0xc3, 0xda, 0x6e, 0xc1, 0xf8, 0x02, 0xfa, 0x95, 0x0f, 0x45, + 0xde, 0x7b, 0x15, 0x5d, 0xd5, 0x6f, 0x17, 0xa3, 0x6f, 0xad, 0x3f, 0x2c, 0xa4, 0x9d, 0x02, 0x2c, + 0x9f, 0xd4, 0xde, 0xd0, 0x52, 0xaf, 0x7c, 0x03, 0x19, 0x3d, 0x5c, 0x73, 0x52, 0x08, 0xc1, 0x50, + 0xd6, 0x1f, 0xc7, 0x5e, 0x0d, 0xd5, 0xfa, 0x9b, 0xb5, 0x08, 0xe5, 0xc6, 0x57, 0x35, 0x89, 0xad, + 0xbf, 0x85, 0x0b, 0xb1, 0x1b, 0x1e, 0xdc, 0x85, 0xd8, 0x8d, 0x8f, 0xe8, 0x7b, 0xde, 0x6f, 0x61, + 0x50, 0x7d, 0xaf, 0x7a, 0x0e, 0xa4, 0xb5, 0xaf, 0xeb, 0xd1, 0xfb, 0x1b, 0x4e, 0x0b, 0x81, 0x3f, + 0x82, 0x6d, 0xf3, 0x32, 0x75, 0x85, 0x54, 0x7e, 0xcc, 0x8e, 0x0e, 0xaa, 0x9b, 0x05, 0xd7, 0x27, + 0xd0, 0x36, 0x83, 0x70, 0x91, 0x00, 0x95, 0xb9, 0x78, 0xd4, 0x2b, 0xef, 0xfa, 0xf7, 0x3e, 0xd9, + 0x72, 0x7a, 0x64, 0x45, 0x8f, 0x5c, 0xa7, 0xa7, 0x14, 0x9c, 0x8b, 0x36, 0x7d, 0x40, 0xfd, 0xf4, + 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xfa, 0x57, 0x5a, 0x4d, 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/containerd/api/grpc/types/api.proto b/vendor/github.com/docker/containerd/api/grpc/types/api.proto new file mode 100644 index 00000000..63a07bda --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/types/api.proto @@ -0,0 +1,311 @@ +syntax = "proto3"; + +package types; + +service API { + rpc GetServerVersion(GetServerVersionRequest) returns (GetServerVersionResponse) {} + rpc CreateContainer(CreateContainerRequest) returns (CreateContainerResponse) {} + rpc UpdateContainer(UpdateContainerRequest) returns (UpdateContainerResponse) {} + rpc Signal(SignalRequest) returns (SignalResponse) {} + rpc UpdateProcess(UpdateProcessRequest) returns (UpdateProcessResponse) {} + rpc AddProcess(AddProcessRequest) returns (AddProcessResponse) {} + rpc CreateCheckpoint(CreateCheckpointRequest) returns (CreateCheckpointResponse) {} + rpc DeleteCheckpoint(DeleteCheckpointRequest) returns (DeleteCheckpointResponse) {} + rpc ListCheckpoint(ListCheckpointRequest) returns (ListCheckpointResponse) {} + rpc State(StateRequest) returns (StateResponse) {} + rpc Events(EventsRequest) returns (stream Event) {} + rpc Stats(StatsRequest) returns (StatsResponse) {} +} + +message GetServerVersionRequest { +} + +message GetServerVersionResponse { + uint32 major = 1; + uint32 minor = 2; + uint32 patch = 3; + string revision = 4; +} + +message UpdateProcessRequest { + string id = 1; + string pid = 2; + bool closeStdin = 3; // Close stdin of the container + uint32 width = 4; + uint32 height = 5; +} + +message UpdateProcessResponse { +} + +message CreateContainerRequest { + string id = 1; // ID of container + string bundlePath = 2; // path to OCI bundle + string checkpoint = 3; // checkpoint name if you want to create immediate checkpoint (optional) + string stdin = 4; // path to the file where stdin will be read (optional) + string stdout = 5; // path to file where stdout will be written (optional) + string stderr = 6; // path to file where stderr will be written (optional) + repeated string labels = 7; + bool noPivotRoot = 8; + string runtime = 9; + repeated string runtimeArgs = 10; + string checkpointDir = 11; // Directory where checkpoints are stored +} + +message CreateContainerResponse { + Container container = 1; +} + +message SignalRequest { + string id = 1; // ID of container + string pid = 2; // PID of process inside container + uint32 signal = 3; // Signal which will be sent, you can find value in "man 7 signal" +} + +message SignalResponse { +} + +message AddProcessRequest { + string id = 1; // ID of container + bool terminal = 2; // Use tty for container stdio + User user = 3; // User under which process will be run + repeated string args = 4; // Arguments for process, first is binary path itself + repeated string env = 5; // List of environment variables for process + string cwd = 6; // Workind directory of process + string pid = 7; // Process ID + string stdin = 8; // path to the file where stdin will be read (optional) + string stdout = 9; // path to file where stdout will be written (optional) + string stderr = 10; // path to file where stderr will be written (optional) + repeated string capabilities = 11; + string apparmorProfile = 12; + string selinuxLabel = 13; + bool noNewPrivileges = 14; + repeated Rlimit rlimits = 15; +} + +message Rlimit { + string type = 1; + uint64 soft = 2; + uint64 hard = 3; +} + +message User { + uint32 uid = 1; // UID of user + uint32 gid = 2; // GID of user + repeated uint32 additionalGids = 3; // Additional groups to which user will be added +} + +message AddProcessResponse { +} + +message CreateCheckpointRequest { + string id = 1; // ID of container + Checkpoint checkpoint = 2; // Checkpoint configuration + string checkpointDir = 3; // Directory where checkpoints are stored +} + +message CreateCheckpointResponse { +} + +message DeleteCheckpointRequest { + string id = 1; // ID of container + string name = 2; // Name of checkpoint + string checkpointDir = 3; // Directory where checkpoints are stored +} + +message DeleteCheckpointResponse { +} + +message ListCheckpointRequest { + string id = 1; // ID of container + string checkpointDir = 2; // Directory where checkpoints are stored +} + +message Checkpoint { + string name = 1; // Name of checkpoint + bool exit = 2; // checkpoint configuration: should container exit on checkpoint or not + bool tcp = 3; // allow open tcp connections + bool unixSockets = 4; // allow external unix sockets + bool shell = 5; // allow shell-jobs +} + +message ListCheckpointResponse { + repeated Checkpoint checkpoints = 1; // List of checkpoints +} + +message StateRequest { + string id = 1; // container id for a single container +} + +message ContainerState { + string status = 1; +} + +message Process { + string pid = 1; + bool terminal = 2; // Use tty for container stdio + User user = 3; // User under which process will be run + repeated string args = 4; // Arguments for process, first is binary path itself + repeated string env = 5; // List of environment variables for process + string cwd = 6; // Workind directory of process + uint32 systemPid = 7; + string stdin = 8; // path to the file where stdin will be read (optional) + string stdout = 9; // path to file where stdout will be written (optional) + string stderr = 10; // path to file where stderr will be written (optional) + repeated string capabilities = 11; + string apparmorProfile = 12; + string selinuxLabel = 13; + bool noNewPrivileges = 14; + repeated Rlimit rlimits = 15; +} + +message Container { + string id = 1; // ID of container + string bundlePath = 2; // Path to OCI bundle + repeated Process processes = 3; // List of processes which run in container + string status = 4; // Container status ("running", "paused", etc.) + repeated string labels = 5; + repeated uint32 pids = 6; + string runtime = 7; // runtime used to execute the container +} + +// Machine is information about machine on which containerd is run +message Machine { + uint32 cpus = 1; // number of cpus + uint64 memory = 2; // amount of memory +} + +// StateResponse is information about containerd daemon +message StateResponse { + repeated Container containers = 1; + Machine machine = 2; +} + +message UpdateContainerRequest { + string id = 1; // ID of container + string pid = 2; + string status = 3; // Status to whcih containerd will try to change + UpdateResource resources =4; +} + +message UpdateResource { + uint32 blkioWeight =1; + uint32 cpuShares = 2; + uint32 cpuPeriod = 3; + uint32 cpuQuota = 4; + string cpusetCpus = 5; + string cpusetMems = 6; + uint32 memoryLimit = 7; + uint32 memorySwap = 8; + uint32 memoryReservation = 9; + uint32 kernelMemoryLimit = 10; +} + +message UpdateContainerResponse { +} + +message EventsRequest { + uint64 timestamp = 1; +} + +message Event { + string type = 1; + string id = 2; + uint32 status = 3; + string pid = 4; + uint64 timestamp = 5; +} + +message NetworkStats { + string name = 1; // name of network interface + uint64 rx_bytes = 2; + uint64 rx_Packets = 3; + uint64 Rx_errors = 4; + uint64 Rx_dropped = 5; + uint64 Tx_bytes = 6; + uint64 Tx_packets = 7; + uint64 Tx_errors = 8; + uint64 Tx_dropped = 9; +} + +message CpuUsage { + uint64 total_usage = 1; + repeated uint64 percpu_usage = 2; + uint64 usage_in_kernelmode = 3; + uint64 usage_in_usermode = 4; +} + +message ThrottlingData { + uint64 periods = 1; + uint64 throttled_periods = 2; + uint64 throttled_time = 3; +} + +message CpuStats { + CpuUsage cpu_usage = 1; + ThrottlingData throttling_data = 2; + uint64 system_usage = 3; +} + +message PidsStats { + uint64 current = 1; + uint64 limit = 2; +} + +message MemoryData { + uint64 usage = 1; + uint64 max_usage = 2; + uint64 failcnt = 3; + uint64 limit = 4; +} + +message MemoryStats { + uint64 cache = 1; + MemoryData usage = 2; + MemoryData swap_usage = 3; + MemoryData kernel_usage = 4; + map stats = 5; +} + +message BlkioStatsEntry { + uint64 major = 1; + uint64 minor = 2; + string op = 3; + uint64 value = 4; +} + +message BlkioStats { + repeated BlkioStatsEntry io_service_bytes_recursive = 1; // number of bytes tranferred to and from the block device + repeated BlkioStatsEntry io_serviced_recursive = 2; + repeated BlkioStatsEntry io_queued_recursive = 3; + repeated BlkioStatsEntry io_service_time_recursive = 4; + repeated BlkioStatsEntry io_wait_time_recursive = 5; + repeated BlkioStatsEntry io_merged_recursive = 6; + repeated BlkioStatsEntry io_time_recursive = 7; + repeated BlkioStatsEntry sectors_recursive = 8; +} + +message HugetlbStats { + uint64 usage = 1; + uint64 max_usage = 2; + uint64 failcnt = 3; + uint64 limit = 4; +} + +message CgroupStats { + CpuStats cpu_stats = 1; + MemoryStats memory_stats = 2; + BlkioStats blkio_stats = 3; + map hugetlb_stats = 4; // the map is in the format "size of hugepage: stats of the hugepage" + PidsStats pids_stats = 5; +} + +message StatsResponse { + repeated NetworkStats network_stats = 1; + CgroupStats cgroup_stats = 2; + uint64 timestamp = 3; +}; + +message StatsRequest { + string id = 1; +} diff --git a/vendor/github.com/docker/containerd/archutils/epoll.go b/vendor/github.com/docker/containerd/archutils/epoll.go new file mode 100644 index 00000000..c8ade640 --- /dev/null +++ b/vendor/github.com/docker/containerd/archutils/epoll.go @@ -0,0 +1,19 @@ +// +build linux,!arm64 + +package archutils + +import ( + "syscall" +) + +func EpollCreate1(flag int) (int, error) { + return syscall.EpollCreate1(flag) +} + +func EpollCtl(epfd int, op int, fd int, event *syscall.EpollEvent) error { + return syscall.EpollCtl(epfd, op, fd, event) +} + +func EpollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) { + return syscall.EpollWait(epfd, events, msec) +} diff --git a/vendor/github.com/docker/containerd/archutils/epoll_arm64.go b/vendor/github.com/docker/containerd/archutils/epoll_arm64.go new file mode 100644 index 00000000..00abc683 --- /dev/null +++ b/vendor/github.com/docker/containerd/archutils/epoll_arm64.go @@ -0,0 +1,70 @@ +// +build linux,arm64 + +package archutils + +// #include +/* +int EpollCreate1(int flag) { + return epoll_create1(flag); +} + +int EpollCtl(int efd, int op,int sfd, int events, int fd) { + struct epoll_event event; + event.events = events; + event.data.fd = fd; + + return epoll_ctl(efd, op, sfd, &event); +} + +struct event_t { + uint32_t events; + int fd; +}; + +struct epoll_event events[128]; +int run_epoll_wait(int fd, struct event_t *event) { + int n, i; + n = epoll_wait(fd, events, 128, -1); + for (i = 0; i < n; i++) { + event[i].events = events[i].events; + event[i].fd = events[i].data.fd; + } + return n; +} +*/ +import "C" + +import ( + "fmt" + "syscall" + "unsafe" +) + +func EpollCreate1(flag int) (int, error) { + fd := int(C.EpollCreate1(C.int(flag))) + if fd < 0 { + return fd, fmt.Errorf("failed to create epoll, errno is %d", fd) + } + return fd, nil +} + +func EpollCtl(epfd int, op int, fd int, event *syscall.EpollEvent) error { + errno := C.EpollCtl(C.int(epfd), C.int(syscall.EPOLL_CTL_ADD), C.int(fd), C.int(event.Events), C.int(event.Fd)) + if errno < 0 { + return fmt.Errorf("Failed to ctl epoll") + } + return nil +} + +func EpollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) { + var c_events [128]C.struct_event_t + n := int(C.run_epoll_wait(C.int(epfd), (*C.struct_event_t)(unsafe.Pointer(&c_events)))) + if n < 0 { + return int(n), fmt.Errorf("Failed to wait epoll") + } + for i := 0; i < n; i++ { + events[i].Fd = int32(c_events[i].fd) + events[i].Events = uint32(c_events[i].events) + } + return int(n), nil +} diff --git a/vendor/github.com/docker/containerd/osutils/fds.go b/vendor/github.com/docker/containerd/osutils/fds.go new file mode 100644 index 00000000..98fc9305 --- /dev/null +++ b/vendor/github.com/docker/containerd/osutils/fds.go @@ -0,0 +1,18 @@ +// +build !windows,!darwin + +package osutils + +import ( + "io/ioutil" + "path/filepath" + "strconv" +) + +// GetOpenFds returns the number of open fds for the process provided by pid +func GetOpenFds(pid int) (int, error) { + dirs, err := ioutil.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) + if err != nil { + return -1, err + } + return len(dirs), nil +} diff --git a/vendor/github.com/docker/containerd/osutils/prctl.go b/vendor/github.com/docker/containerd/osutils/prctl.go new file mode 100644 index 00000000..eeb3da1b --- /dev/null +++ b/vendor/github.com/docker/containerd/osutils/prctl.go @@ -0,0 +1,43 @@ +// +build linux + +// http://man7.org/linux/man-pages/man2/prctl.2.html +package osutils + +import ( + "syscall" + "unsafe" +) + +// If arg2 is nonzero, set the "child subreaper" attribute of the +// calling process; if arg2 is zero, unset the attribute. When a +// process is marked as a child subreaper, all of the children +// that it creates, and their descendants, will be marked as +// having a subreaper. In effect, a subreaper fulfills the role +// of init(1) for its descendant processes. Upon termination of +// a process that is orphaned (i.e., its immediate parent has +// already terminated) and marked as having a subreaper, the +// nearest still living ancestor subreaper will receive a SIGCHLD +// signal and be able to wait(2) on the process to discover its +// termination status. +const PR_SET_CHILD_SUBREAPER = 36 + +// Return the "child subreaper" setting of the caller, in the +// location pointed to by (int *) arg2. +const PR_GET_CHILD_SUBREAPER = 37 + +// GetSubreaper returns the subreaper setting for the calling process +func GetSubreaper() (int, error) { + var i uintptr + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0); err != 0 { + return -1, err + } + return int(i), nil +} + +// SetSubreaper sets the value i as the subreaper setting for the calling process +func SetSubreaper(i int) error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, PR_SET_CHILD_SUBREAPER, uintptr(i), 0); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/docker/containerd/osutils/prctl_solaris.go b/vendor/github.com/docker/containerd/osutils/prctl_solaris.go new file mode 100644 index 00000000..84b2a777 --- /dev/null +++ b/vendor/github.com/docker/containerd/osutils/prctl_solaris.go @@ -0,0 +1,18 @@ +// +build solaris + +package osutils + +import ( + "errors" +) + +//Solaris TODO +// GetSubreaper returns the subreaper setting for the calling process +func GetSubreaper() (int, error) { + return 0, errors.New("osutils GetSubreaper not implemented on Solaris") +} + +// SetSubreaper sets the value i as the subreaper setting for the calling process +func SetSubreaper(i int) error { + return errors.New("osutils SetSubreaper not implemented on Solaris") +} diff --git a/vendor/github.com/docker/containerd/osutils/reaper.go b/vendor/github.com/docker/containerd/osutils/reaper.go new file mode 100644 index 00000000..ab7d24d9 --- /dev/null +++ b/vendor/github.com/docker/containerd/osutils/reaper.go @@ -0,0 +1,47 @@ +// +build !windows + +package osutils + +import "syscall" + +// Exit is the wait4 information from an exited process +type Exit struct { + Pid int + Status int +} + +// Reap reaps all child processes for the calling process and returns their +// exit information +func Reap() (exits []Exit, err error) { + var ( + ws syscall.WaitStatus + rus syscall.Rusage + ) + for { + pid, err := syscall.Wait4(-1, &ws, syscall.WNOHANG, &rus) + if err != nil { + if err == syscall.ECHILD { + return exits, nil + } + return exits, err + } + if pid <= 0 { + return exits, nil + } + exits = append(exits, Exit{ + Pid: pid, + Status: exitStatus(ws), + }) + } +} + +const exitSignalOffset = 128 + +// exitStatus returns the correct exit status for a process based on if it +// was signaled or exited cleanly +func exitStatus(status syscall.WaitStatus) int { + if status.Signaled() { + return exitSignalOffset + int(status.Signal()) + } + return status.ExitStatus() +} diff --git a/vendor/github.com/docker/containerd/runtime/container.go b/vendor/github.com/docker/containerd/runtime/container.go new file mode 100644 index 00000000..54848b13 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/container.go @@ -0,0 +1,593 @@ +package runtime + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/specs" + "github.com/docker/containerd/subreaper/exec" + ocs "github.com/opencontainers/runtime-spec/specs-go" +) + +type Container interface { + // ID returns the container ID + ID() string + // Path returns the path to the bundle + Path() string + // Start starts the init process of the container + Start(checkpointPath string, s Stdio) (Process, error) + // Exec starts another process in an existing container + Exec(string, specs.ProcessSpec, Stdio) (Process, error) + // Delete removes the container's state and any resources + Delete() error + // Processes returns all the containers processes that have been added + Processes() ([]Process, error) + // State returns the containers runtime state + State() State + // Resume resumes a paused container + Resume() error + // Pause pauses a running container + Pause() error + // RemoveProcess removes the specified process from the container + RemoveProcess(string) error + // Checkpoints returns all the checkpoints for a container + Checkpoints(checkpointDir string) ([]Checkpoint, error) + // Checkpoint creates a new checkpoint + Checkpoint(checkpoint Checkpoint, checkpointDir string) error + // DeleteCheckpoint deletes the checkpoint for the provided name + DeleteCheckpoint(name string, checkpointDir string) error + // Labels are user provided labels for the container + Labels() []string + // Pids returns all pids inside the container + Pids() ([]int, error) + // Stats returns realtime container stats and resource information + Stats() (*Stat, error) + // Name or path of the OCI compliant runtime used to execute the container + Runtime() string + // OOM signals the channel if the container received an OOM notification + OOM() (OOM, error) + // UpdateResource updates the containers resources to new values + UpdateResources(*Resource) error + + // Status return the current status of the container. + Status() (State, error) +} + +type OOM interface { + io.Closer + FD() int + ContainerID() string + Flush() + Removed() bool +} + +type Stdio struct { + Stdin string + Stdout string + Stderr string +} + +func NewStdio(stdin, stdout, stderr string) Stdio { + for _, s := range []*string{ + &stdin, &stdout, &stderr, + } { + if *s == "" { + *s = "/dev/null" + } + } + return Stdio{ + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + } +} + +type ContainerOpts struct { + Root string + ID string + Bundle string + Runtime string + RuntimeArgs []string + Shim string + Labels []string + NoPivotRoot bool + Timeout time.Duration +} + +// New returns a new container +func New(opts ContainerOpts) (Container, error) { + c := &container{ + root: opts.Root, + id: opts.ID, + bundle: opts.Bundle, + labels: opts.Labels, + processes: make(map[string]Process), + runtime: opts.Runtime, + runtimeArgs: opts.RuntimeArgs, + shim: opts.Shim, + noPivotRoot: opts.NoPivotRoot, + timeout: opts.Timeout, + } + if err := os.Mkdir(filepath.Join(c.root, c.id), 0755); err != nil { + return nil, err + } + f, err := os.Create(filepath.Join(c.root, c.id, StateFile)) + if err != nil { + return nil, err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(state{ + Bundle: c.bundle, + Labels: c.labels, + Runtime: c.runtime, + RuntimeArgs: c.runtimeArgs, + Shim: c.shim, + NoPivotRoot: opts.NoPivotRoot, + }); err != nil { + return nil, err + } + return c, nil +} + +func Load(root, id string, timeout time.Duration) (Container, error) { + var s state + f, err := os.Open(filepath.Join(root, id, StateFile)) + if err != nil { + return nil, err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&s); err != nil { + return nil, err + } + c := &container{ + root: root, + id: id, + bundle: s.Bundle, + labels: s.Labels, + runtime: s.Runtime, + runtimeArgs: s.RuntimeArgs, + shim: s.Shim, + noPivotRoot: s.NoPivotRoot, + processes: make(map[string]Process), + timeout: timeout, + } + dirs, err := ioutil.ReadDir(filepath.Join(root, id)) + if err != nil { + return nil, err + } + for _, d := range dirs { + if !d.IsDir() { + continue + } + pid := d.Name() + s, err := readProcessState(filepath.Join(root, id, pid)) + if err != nil { + return nil, err + } + p, err := loadProcess(filepath.Join(root, id, pid), pid, c, s) + if err != nil { + logrus.WithField("id", id).WithField("pid", pid).Debug("containerd: error loading process %s", err) + continue + } + c.processes[pid] = p + } + return c, nil +} + +func readProcessState(dir string) (*ProcessState, error) { + f, err := os.Open(filepath.Join(dir, "process.json")) + if err != nil { + return nil, err + } + defer f.Close() + var s ProcessState + if err := json.NewDecoder(f).Decode(&s); err != nil { + return nil, err + } + return &s, nil +} + +type container struct { + // path to store runtime state information + root string + id string + bundle string + runtime string + runtimeArgs []string + shim string + processes map[string]Process + labels []string + oomFds []int + noPivotRoot bool + timeout time.Duration +} + +func (c *container) ID() string { + return c.id +} + +func (c *container) Path() string { + return c.bundle +} + +func (c *container) Labels() []string { + return c.labels +} + +func (c *container) readSpec() (*specs.Spec, error) { + var spec specs.Spec + f, err := os.Open(filepath.Join(c.bundle, "config.json")) + if err != nil { + return nil, err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&spec); err != nil { + return nil, err + } + return &spec, nil +} + +func (c *container) Delete() error { + err := os.RemoveAll(filepath.Join(c.root, c.id)) + + args := c.runtimeArgs + args = append(args, "delete", c.id) + if derr := exec.Command(c.runtime, args...).Run(); err == nil { + err = derr + } + return err +} + +func (c *container) Processes() ([]Process, error) { + out := []Process{} + for _, p := range c.processes { + out = append(out, p) + } + return out, nil +} + +func (c *container) RemoveProcess(pid string) error { + delete(c.processes, pid) + return os.RemoveAll(filepath.Join(c.root, c.id, pid)) +} + +func (c *container) State() State { + proc := c.processes["init"] + if proc == nil { + return Stopped + } + return proc.State() +} + +func (c *container) Runtime() string { + return c.runtime +} + +func (c *container) Pause() error { + args := c.runtimeArgs + args = append(args, "pause", c.id) + b, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %q", err.Error(), string(b)) + } + return nil +} + +func (c *container) Resume() error { + args := c.runtimeArgs + args = append(args, "resume", c.id) + b, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %q", err.Error(), string(b)) + } + return nil +} + +func (c *container) Checkpoints(checkpointDir string) ([]Checkpoint, error) { + if checkpointDir == "" { + checkpointDir = filepath.Join(c.bundle, "checkpoints") + } + + dirs, err := ioutil.ReadDir(checkpointDir) + if err != nil { + return nil, err + } + var out []Checkpoint + for _, d := range dirs { + if !d.IsDir() { + continue + } + path := filepath.Join(checkpointDir, d.Name(), "config.json") + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + var cpt Checkpoint + if err := json.Unmarshal(data, &cpt); err != nil { + return nil, err + } + out = append(out, cpt) + } + return out, nil +} + +func (c *container) Checkpoint(cpt Checkpoint, checkpointDir string) error { + if checkpointDir == "" { + checkpointDir = filepath.Join(c.bundle, "checkpoints") + } + + if err := os.MkdirAll(checkpointDir, 0755); err != nil { + return err + } + + path := filepath.Join(checkpointDir, cpt.Name) + if err := os.Mkdir(path, 0755); err != nil { + return err + } + f, err := os.Create(filepath.Join(path, "config.json")) + if err != nil { + return err + } + cpt.Created = time.Now() + err = json.NewEncoder(f).Encode(cpt) + f.Close() + if err != nil { + return err + } + args := []string{ + "checkpoint", + "--image-path", path, + } + add := func(flags ...string) { + args = append(args, flags...) + } + add(c.runtimeArgs...) + if !cpt.Exit { + add("--leave-running") + } + if cpt.Shell { + add("--shell-job") + } + if cpt.Tcp { + add("--tcp-established") + } + if cpt.UnixSockets { + add("--ext-unix-sk") + } + add(c.id) + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %q", err.Error(), string(out)) + } + return err +} + +func (c *container) DeleteCheckpoint(name string, checkpointDir string) error { + if checkpointDir == "" { + checkpointDir = filepath.Join(c.bundle, "checkpoints") + } + return os.RemoveAll(filepath.Join(checkpointDir, name)) +} + +func (c *container) Start(checkpointPath string, s Stdio) (Process, error) { + processRoot := filepath.Join(c.root, c.id, InitProcessID) + if err := os.Mkdir(processRoot, 0755); err != nil { + return nil, err + } + spec, err := c.readSpec() + if err != nil { + return nil, err + } + config := &processConfig{ + checkpoint: checkpointPath, + root: processRoot, + id: InitProcessID, + c: c, + stdio: s, + spec: spec, + processSpec: specs.ProcessSpec(spec.Process), + } + p, err := c.newProcess(config) + if err != nil { + return nil, err + } + if err := p.Start(); err != nil { + return nil, err + } + c.processes[InitProcessID] = p + return p, nil +} + +func (c *container) Exec(pid string, pspec specs.ProcessSpec, s Stdio) (pp Process, err error) { + processRoot := filepath.Join(c.root, c.id, pid) + if err := os.Mkdir(processRoot, 0755); err != nil { + return nil, err + } + defer func() { + if err != nil { + c.RemoveProcess(pid) + } + }() + spec, err := c.readSpec() + if err != nil { + return nil, err + } + config := &processConfig{ + exec: true, + id: pid, + root: processRoot, + c: c, + processSpec: pspec, + spec: spec, + stdio: s, + } + p, err := c.newProcess(config) + if err != nil { + return nil, err + } + if err := p.Start(); err != nil { + return nil, err + } + c.processes[pid] = p + return p, nil +} + +func hostIDFromMap(id uint32, mp []ocs.IDMapping) int { + for _, m := range mp { + if (id >= m.ContainerID) && (id <= (m.ContainerID + m.Size - 1)) { + return int(m.HostID + (id - m.ContainerID)) + } + } + return 0 +} + +func (c *container) Pids() ([]int, error) { + args := c.runtimeArgs + args = append(args, "ps", "--format=json", c.id) + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("%s: %q", err.Error(), out) + } + var pids []int + if err := json.Unmarshal(out, &pids); err != nil { + return nil, err + } + return pids, nil +} + +func (c *container) Stats() (*Stat, error) { + now := time.Now() + args := c.runtimeArgs + args = append(args, "events", "--stats", c.id) + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("%s: %q", err.Error(), out) + } + s := struct { + Data *Stat `json:"data"` + }{} + if err := json.Unmarshal(out, &s); err != nil { + return nil, err + } + s.Data.Timestamp = now + return s.Data, nil +} + +// Status implements the runtime Container interface. +func (c *container) Status() (State, error) { + args := c.runtimeArgs + args = append(args, "state", c.id) + + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return "", fmt.Errorf("%s: %q", err.Error(), out) + } + + // We only require the runtime json output to have a top level Status field. + var s struct { + Status State `json:"status"` + } + if err := json.Unmarshal(out, &s); err != nil { + return "", err + } + return s.Status, nil +} + +func (c *container) writeEventFD(root string, cfd, efd int) error { + f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd)) + return err +} + +func (c *container) newProcess(config *processConfig) (Process, error) { + if c.shim == "" { + return newDirectProcess(config) + } + return newProcess(config) +} + +type waitArgs struct { + pid int + err error +} + +// isAlive checks if the shim that launched the container is still alive +func isAlive(cmd *exec.Cmd) (bool, error) { + if err := syscall.Kill(cmd.Process.Pid, 0); err != nil { + if err == syscall.ESRCH { + return false, nil + } + return false, err + } + return true, nil +} + +type oom struct { + id string + root string + control *os.File + eventfd int +} + +func (o *oom) ContainerID() string { + return o.id +} + +func (o *oom) FD() int { + return o.eventfd +} + +func (o *oom) Flush() { + buf := make([]byte, 8) + syscall.Read(o.eventfd, buf) +} + +func (o *oom) Removed() bool { + _, err := os.Lstat(filepath.Join(o.root, "cgroup.event_control")) + return os.IsNotExist(err) +} + +func (o *oom) Close() error { + err := syscall.Close(o.eventfd) + if cerr := o.control.Close(); err == nil { + err = cerr + } + return err +} + +type message struct { + Level string `json:"level"` + Msg string `json:"msg"` +} + +func readLogMessages(path string) ([]message, error) { + var out []message + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + dec := json.NewDecoder(f) + for { + var m message + if err := dec.Decode(&m); err != nil { + if err == io.EOF { + break + } + return nil, err + } + out = append(out, m) + } + return out, nil +} diff --git a/vendor/github.com/docker/containerd/runtime/container_linux.go b/vendor/github.com/docker/containerd/runtime/container_linux.go new file mode 100644 index 00000000..9e2e6d6b --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/container_linux.go @@ -0,0 +1,134 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/containerd/specs" + "github.com/docker/containerd/subreaper/exec" + "github.com/opencontainers/runc/libcontainer" + ocs "github.com/opencontainers/runtime-spec/specs-go" +) + +func (c *container) getLibctContainer() (libcontainer.Container, error) { + runtimeRoot := "/run/runc" + + // Check that the root wasn't changed + for _, opt := range c.runtimeArgs { + if strings.HasPrefix(opt, "--root=") { + runtimeRoot = strings.TrimPrefix(opt, "--root=") + break + } + } + + f, err := libcontainer.New(runtimeRoot, libcontainer.Cgroupfs) + if err != nil { + return nil, err + } + return f.Load(c.id) +} + +func (c *container) OOM() (OOM, error) { + container, err := c.getLibctContainer() + if err != nil { + if lerr, ok := err.(libcontainer.Error); ok { + // with oom registration sometimes the container can run, exit, and be destroyed + // faster than we can get the state back so we can just ignore this + if lerr.Code() == libcontainer.ContainerNotExists { + return nil, ErrContainerExited + } + } + return nil, err + } + state, err := container.State() + if err != nil { + return nil, err + } + memoryPath := state.CgroupPaths["memory"] + return c.getMemeoryEventFD(memoryPath) +} + +func u64Ptr(i uint64) *uint64 { return &i } + +func (c *container) UpdateResources(r *Resource) error { + sr := ocs.Resources{ + Memory: &ocs.Memory{ + Limit: u64Ptr(uint64(r.Memory)), + Reservation: u64Ptr(uint64(r.MemoryReservation)), + Swap: u64Ptr(uint64(r.MemorySwap)), + Kernel: u64Ptr(uint64(r.KernelMemory)), + }, + CPU: &ocs.CPU{ + Shares: u64Ptr(uint64(r.CPUShares)), + Quota: u64Ptr(uint64(r.CPUQuota)), + Period: u64Ptr(uint64(r.CPUPeriod)), + Cpus: &r.CpusetCpus, + Mems: &r.CpusetMems, + }, + BlockIO: &ocs.BlockIO{ + Weight: &r.BlkioWeight, + }, + } + + srStr := bytes.NewBuffer(nil) + if err := json.NewEncoder(srStr).Encode(&sr); err != nil { + return err + } + + args := c.runtimeArgs + args = append(args, "update", "-r", "-", c.id) + cmd := exec.Command(c.runtime, args...) + cmd.Stdin = srStr + b, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf(string(b)) + } + return nil +} + +func getRootIDs(s *specs.Spec) (int, int, error) { + if s == nil { + return 0, 0, nil + } + var hasUserns bool + for _, ns := range s.Linux.Namespaces { + if ns.Type == ocs.UserNamespace { + hasUserns = true + break + } + } + if !hasUserns { + return 0, 0, nil + } + uid := hostIDFromMap(0, s.Linux.UIDMappings) + gid := hostIDFromMap(0, s.Linux.GIDMappings) + return uid, gid, nil +} + +func (c *container) getMemeoryEventFD(root string) (*oom, error) { + f, err := os.Open(filepath.Join(root, "memory.oom_control")) + if err != nil { + return nil, err + } + fd, _, serr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) + if serr != 0 { + f.Close() + return nil, serr + } + if err := c.writeEventFD(root, int(f.Fd()), int(fd)); err != nil { + syscall.Close(int(fd)) + f.Close() + return nil, err + } + return &oom{ + root: root, + id: c.id, + eventfd: int(fd), + control: f, + }, nil +} diff --git a/vendor/github.com/docker/containerd/runtime/container_solaris.go b/vendor/github.com/docker/containerd/runtime/container_solaris.go new file mode 100644 index 00000000..4d0d8b4a --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/container_solaris.go @@ -0,0 +1,19 @@ +package runtime + +import ( + "errors" + + "github.com/docker/containerd/specs" +) + +func (c *container) OOM() (OOM, error) { + return nil, errors.New("runtime OOM() not implemented on Solaris") +} + +func (c *container) UpdateResources(r *Resource) error { + return errors.New("runtime UpdateResources() not implemented on Solaris") +} + +func getRootIDs(s *specs.Spec) (int, int, error) { + return 0, 0, errors.New("runtime getRootIDs() not implemented on Solaris") +} diff --git a/vendor/github.com/docker/containerd/runtime/direct_process.go b/vendor/github.com/docker/containerd/runtime/direct_process.go new file mode 100644 index 00000000..fe8ee894 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/direct_process.go @@ -0,0 +1,283 @@ +package runtime + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path" + "path/filepath" + "sync" + "syscall" + + "github.com/docker/containerd/specs" + "github.com/docker/containerd/subreaper" + "github.com/docker/containerd/subreaper/exec" + "github.com/docker/docker/pkg/term" + "github.com/opencontainers/runc/libcontainer" +) + +type directProcess struct { + *process + sync.WaitGroup + + io stdio + console libcontainer.Console + consolePath string + exec bool + checkpoint string + specs *specs.Spec +} + +func newDirectProcess(config *processConfig) (*directProcess, error) { + lp, err := newProcess(config) + if err != nil { + return nil, err + } + + return &directProcess{ + specs: config.spec, + process: lp, + exec: config.exec, + checkpoint: config.checkpoint, + }, nil +} + +func (d *directProcess) CloseStdin() error { + if d.io.stdin != nil { + return d.io.stdin.Close() + } + return nil +} + +func (d *directProcess) Resize(w, h int) error { + if d.console == nil { + return nil + } + ws := term.Winsize{ + Width: uint16(w), + Height: uint16(h), + } + return term.SetWinsize(d.console.Fd(), &ws) +} + +func (d *directProcess) openIO() (*os.File, *os.File, *os.File, error) { + uid, gid, err := getRootIDs(d.specs) + if err != nil { + return nil, nil, nil, err + } + + if d.spec.Terminal { + console, err := libcontainer.NewConsole(uid, gid) + if err != nil { + return nil, nil, nil, err + } + d.console = console + d.consolePath = console.Path() + stdin, err := os.OpenFile(d.stdio.Stdin, syscall.O_RDONLY, 0) + if err != nil { + return nil, nil, nil, err + } + go io.Copy(console, stdin) + stdout, err := os.OpenFile(d.stdio.Stdout, syscall.O_RDWR, 0) + if err != nil { + return nil, nil, nil, err + } + d.Add(1) + go func() { + io.Copy(stdout, console) + console.Close() + d.Done() + }() + d.io.stdin = stdin + d.io.stdout = stdout + d.io.stderr = stdout + return nil, nil, nil, nil + } + + stdin, err := os.OpenFile(d.stdio.Stdin, syscall.O_RDONLY|syscall.O_NONBLOCK, 0) + if err != nil { + return nil, nil, nil, err + } + + stdout, err := os.OpenFile(d.stdio.Stdout, syscall.O_RDWR, 0) + if err != nil { + return nil, nil, nil, err + } + + stderr, err := os.OpenFile(d.stdio.Stderr, syscall.O_RDWR, 0) + if err != nil { + return nil, nil, nil, err + } + + d.io.stdin = stdin + d.io.stdout = stdout + d.io.stderr = stderr + return stdin, stdout, stderr, nil +} + +func (d *directProcess) loadCheckpoint(bundle string) (*Checkpoint, error) { + if d.checkpoint == "" { + return nil, nil + } + + f, err := os.Open(filepath.Join(bundle, "checkpoints", d.checkpoint, "config.json")) + if err != nil { + return nil, err + } + defer f.Close() + var cpt Checkpoint + if err := json.NewDecoder(f).Decode(&cpt); err != nil { + return nil, err + } + return &cpt, nil +} + +func (d *directProcess) Start() error { + cwd, err := filepath.Abs(d.root) + if err != nil { + return err + } + + stdin, stdout, stderr, err := d.openIO() + if err != nil { + return nil + } + + checkpoint, err := d.loadCheckpoint(d.container.bundle) + if err != nil { + return err + } + + logPath := filepath.Join(cwd, "log.json") + args := append([]string{ + "--log", logPath, + "--log-format", "json", + }, d.container.runtimeArgs...) + if d.exec { + args = append(args, "exec", + "--process", filepath.Join(cwd, "process.json"), + "--console", d.consolePath, + ) + } else if checkpoint != nil { + args = append(args, "restore", + "--image-path", filepath.Join(d.container.bundle, "checkpoints", checkpoint.Name), + ) + add := func(flags ...string) { + args = append(args, flags...) + } + if checkpoint.Shell { + add("--shell-job") + } + if checkpoint.Tcp { + add("--tcp-established") + } + if checkpoint.UnixSockets { + add("--ext-unix-sk") + } + if d.container.noPivotRoot { + add("--no-pivot") + } + } else { + args = append(args, "start", + "--bundle", d.container.bundle, + "--console", d.consolePath, + ) + if d.container.noPivotRoot { + args = append(args, "--no-pivot") + } + } + args = append(args, + "-d", + "--pid-file", filepath.Join(cwd, "pid"), + d.container.id, + ) + cmd := exec.Command(d.container.runtime, args...) + cmd.Dir = d.container.bundle + cmd.Stdin = stdin + cmd.Stdout = stdout + cmd.Stderr = stderr + // set the parent death signal to SIGKILL so that if containerd dies the container + // process also dies + cmd.SysProcAttr = &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGKILL, + } + + exitSubscription := subreaper.Subscribe() + err = d.startCmd(cmd) + if err != nil { + subreaper.Unsubscribe(exitSubscription) + d.delete() + return err + } + + go d.watch(cmd, exitSubscription) + + return nil +} + +func (d *directProcess) watch(cmd *exec.Cmd, exitSubscription *subreaper.Subscription) { + defer subreaper.Unsubscribe(exitSubscription) + defer d.delete() + + f, err := os.OpenFile(path.Join(d.root, ExitFile), syscall.O_WRONLY, 0) + if err == nil { + defer f.Close() + } + + exitCode := 0 + if err = cmd.Wait(); err != nil { + if exitError, ok := err.(exec.ExitCodeError); ok { + exitCode = exitError.Code + } + } + + if exitCode == 0 { + pid, err := d.getPidFromFile() + if err != nil { + return + } + exitSubscription.SetPid(pid) + exitCode = exitSubscription.Wait() + } + + writeInt(path.Join(d.root, ExitStatusFile), exitCode) +} + +func (d *directProcess) delete() { + if d.console != nil { + d.console.Close() + } + d.io.Close() + d.Wait() + if !d.exec { + exec.Command(d.container.runtime, append(d.container.runtimeArgs, "delete", d.container.id)...).Run() + } +} + +func writeInt(path string, i int) error { + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + _, err = fmt.Fprintf(f, "%d", i) + return err +} + +type stdio struct { + stdin *os.File + stdout *os.File + stderr *os.File +} + +func (s stdio) Close() error { + err := s.stdin.Close() + if oerr := s.stdout.Close(); err == nil { + err = oerr + } + if oerr := s.stderr.Close(); err == nil { + err = oerr + } + return err +} diff --git a/vendor/github.com/docker/containerd/runtime/process.go b/vendor/github.com/docker/containerd/runtime/process.go new file mode 100644 index 00000000..0e8738d7 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/process.go @@ -0,0 +1,340 @@ +package runtime + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "syscall" + "time" + + "github.com/docker/containerd/specs" + "github.com/docker/containerd/subreaper/exec" + "golang.org/x/sys/unix" +) + +type Process interface { + io.Closer + + // ID of the process. + // This is either "init" when it is the container's init process or + // it is a user provided id for the process similar to the container id + ID() string + CloseStdin() error + Resize(int, int) error + // ExitFD returns the fd the provides an event when the process exits + ExitFD() int + // ExitStatus returns the exit status of the process or an error if it + // has not exited + ExitStatus() (int, error) + // Spec returns the process spec that created the process + Spec() specs.ProcessSpec + // Signal sends the provided signal to the process + Signal(os.Signal) error + // Container returns the container that the process belongs to + Container() Container + // Stdio of the container + Stdio() Stdio + // SystemPid is the pid on the system + SystemPid() int + // State returns if the process is running or not + State() State + // Start executes the process + Start() error +} + +type processConfig struct { + id string + root string + processSpec specs.ProcessSpec + spec *specs.Spec + c *container + stdio Stdio + exec bool + checkpoint string +} + +func newProcess(config *processConfig) (*process, error) { + p := &process{ + root: config.root, + id: config.id, + container: config.c, + spec: config.processSpec, + stdio: config.stdio, + } + uid, gid, err := getRootIDs(config.spec) + if err != nil { + return nil, err + } + f, err := os.Create(filepath.Join(config.root, "process.json")) + if err != nil { + return nil, err + } + defer f.Close() + + ps := ProcessState{ + ProcessSpec: config.processSpec, + Exec: config.exec, + PlatformProcessState: PlatformProcessState{ + Checkpoint: config.checkpoint, + RootUID: uid, + RootGID: gid, + }, + Stdin: config.stdio.Stdin, + Stdout: config.stdio.Stdout, + Stderr: config.stdio.Stderr, + RuntimeArgs: config.c.runtimeArgs, + NoPivotRoot: config.c.noPivotRoot, + } + + if err := json.NewEncoder(f).Encode(ps); err != nil { + return nil, err + } + exit, err := getExitPipe(filepath.Join(config.root, ExitFile)) + if err != nil { + return nil, err + } + control, err := getControlPipe(filepath.Join(config.root, ControlFile)) + if err != nil { + return nil, err + } + p.exitPipe = exit + p.controlPipe = control + return p, nil +} + +func (p *process) Start() error { + cmd := exec.Command(p.container.shim, + p.container.id, p.container.bundle, p.container.runtime, + ) + cmd.Dir = p.root + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + + return p.startCmd(cmd) +} + +func loadProcess(root, id string, c *container, s *ProcessState) (*process, error) { + p := &process{ + root: root, + id: id, + container: c, + spec: s.ProcessSpec, + stdio: Stdio{ + Stdin: s.Stdin, + Stdout: s.Stdout, + Stderr: s.Stderr, + }, + } + if _, err := p.getPidFromFile(); err != nil { + return nil, err + } + if _, err := p.ExitStatus(); err != nil { + if err == ErrProcessNotExited { + exit, err := getExitPipe(filepath.Join(root, ExitFile)) + if err != nil { + return nil, err + } + p.exitPipe = exit + return p, nil + } + return nil, err + } + return p, nil +} + +type process struct { + root string + id string + pid int + exitPipe *os.File + controlPipe *os.File + container *container + spec specs.ProcessSpec + stdio Stdio +} + +func (p *process) ID() string { + return p.id +} + +func (p *process) Container() Container { + return p.container +} + +func (p *process) SystemPid() int { + return p.pid +} + +// ExitFD returns the fd of the exit pipe +func (p *process) ExitFD() int { + return int(p.exitPipe.Fd()) +} + +func (p *process) CloseStdin() error { + _, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 0, 0, 0) + return err +} + +func (p *process) Resize(w, h int) error { + _, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 1, w, h) + return err +} + +func (p *process) ExitStatus() (int, error) { + data, err := ioutil.ReadFile(filepath.Join(p.root, ExitStatusFile)) + if err != nil { + if os.IsNotExist(err) { + return -1, ErrProcessNotExited + } + return -1, err + } + if len(data) == 0 { + return -1, ErrProcessNotExited + } + return strconv.Atoi(string(data)) +} + +func (p *process) Spec() specs.ProcessSpec { + return p.spec +} + +func (p *process) Stdio() Stdio { + return p.stdio +} + +// Close closes any open files and/or resouces on the process +func (p *process) Close() error { + return p.exitPipe.Close() +} + +func (p *process) State() State { + if p.pid == 0 { + return Stopped + } + err := syscall.Kill(p.pid, 0) + if err != nil && err == syscall.ESRCH { + return Stopped + } + return Running +} + +func (p *process) getPidFromFile() (int, error) { + data, err := ioutil.ReadFile(filepath.Join(p.root, "pid")) + if err != nil { + return -1, err + } + i, err := strconv.Atoi(string(data)) + if err != nil { + return -1, errInvalidPidInt + } + p.pid = i + return i, nil +} + +func getExitPipe(path string) (*os.File, error) { + if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + // add NONBLOCK in case the other side has already closed or else + // this function would never return + return os.OpenFile(path, syscall.O_RDONLY|syscall.O_NONBLOCK, 0) +} + +func getControlPipe(path string) (*os.File, error) { + if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + return os.OpenFile(path, syscall.O_RDWR|syscall.O_NONBLOCK, 0) +} + +// Signal sends the provided signal to the process +func (p *process) Signal(s os.Signal) error { + return syscall.Kill(p.pid, s.(syscall.Signal)) +} + +func (p *process) startCmd(cmd *exec.Cmd) error { + if err := cmd.Start(); err != nil { + if exErr, ok := err.(*exec.Error); ok { + if exErr.Err == exec.ErrNotFound || exErr.Err == os.ErrNotExist { + return fmt.Errorf("%s not installed on system", p.container.shim) + } + } + return err + } + if err := p.waitForStart(cmd); err != nil { + return err + } + return nil +} + +func (p *process) waitForStart(cmd *exec.Cmd) error { + wc := make(chan error, 1) + go func() { + for { + if _, err := p.getPidFromFile(); err != nil { + if os.IsNotExist(err) || err == errInvalidPidInt { + alive, err := isAlive(cmd) + if err != nil { + wc <- err + return + } + if !alive { + // runc could have failed to run the container so lets get the error + // out of the logs or the shim could have encountered an error + messages, err := readLogMessages(filepath.Join(p.root, "shim-log.json")) + if err != nil && !os.IsNotExist(err) { + wc <- err + return + } + for _, m := range messages { + if m.Level == "error" { + wc <- fmt.Errorf("shim error: %v", m.Msg) + return + } + } + // no errors reported back from shim, check for runc/runtime errors + messages, err = readLogMessages(filepath.Join(p.root, "log.json")) + if err != nil { + if os.IsNotExist(err) { + err = ErrContainerNotStarted + } + wc <- err + return + } + for _, m := range messages { + if m.Level == "error" { + wc <- fmt.Errorf("oci runtime error: %v", m.Msg) + return + } + } + wc <- ErrContainerNotStarted + return + } + time.Sleep(15 * time.Millisecond) + continue + } + wc <- err + return + } + // the pid file was read successfully + wc <- nil + return + } + }() + select { + case err := <-wc: + if err != nil { + return err + } + return nil + case <-time.After(p.container.timeout): + cmd.Process.Kill() + cmd.Wait() + return ErrContainerStartTimeout + } +} diff --git a/vendor/github.com/docker/containerd/runtime/runtime.go b/vendor/github.com/docker/containerd/runtime/runtime.go new file mode 100644 index 00000000..ff2094d4 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/runtime.go @@ -0,0 +1,99 @@ +package runtime + +import ( + "errors" + "time" + + "github.com/docker/containerd/specs" +) + +var ( + ErrNotChildProcess = errors.New("containerd: not a child process for container") + ErrInvalidContainerType = errors.New("containerd: invalid container type for runtime") + ErrCheckpointNotExists = errors.New("containerd: checkpoint does not exist for container") + ErrCheckpointExists = errors.New("containerd: checkpoint already exists") + ErrContainerExited = errors.New("containerd: container has exited") + ErrTerminalsNotSupported = errors.New("containerd: terminals are not supported for runtime") + ErrProcessNotExited = errors.New("containerd: process has not exited") + ErrProcessExited = errors.New("containerd: process has exited") + ErrContainerNotStarted = errors.New("containerd: container not started") + ErrContainerStartTimeout = errors.New("containerd: container did not start before the specified timeout") + + errNoPidFile = errors.New("containerd: no process pid file found") + errInvalidPidInt = errors.New("containerd: process pid is invalid") + errNotImplemented = errors.New("containerd: not implemented") +) + +const ( + ExitFile = "exit" + ExitStatusFile = "exitStatus" + StateFile = "state.json" + ControlFile = "control" + InitProcessID = "init" +) + +type Checkpoint struct { + // Timestamp is the time that checkpoint happened + Created time.Time `json:"created"` + // Name is the name of the checkpoint + Name string `json:"name"` + // Tcp checkpoints open tcp connections + Tcp bool `json:"tcp"` + // UnixSockets persists unix sockets in the checkpoint + UnixSockets bool `json:"unixSockets"` + // Shell persists tty sessions in the checkpoint + Shell bool `json:"shell"` + // Exit exits the container after the checkpoint is finished + Exit bool `json:"exit"` +} + +// PlatformProcessState container platform-specific fields in the ProcessState structure +type PlatformProcessState struct { + Checkpoint string `json:"checkpoint"` + RootUID int `json:"rootUID"` + RootGID int `json:"rootGID"` +} +type State string + +type Resource struct { + CPUShares int64 + BlkioWeight uint16 + CPUPeriod int64 + CPUQuota int64 + CpusetCpus string + CpusetMems string + KernelMemory int64 + Memory int64 + MemoryReservation int64 + MemorySwap int64 +} + +const ( + Paused = State("paused") + Stopped = State("stopped") + Running = State("running") +) + +type state struct { + Bundle string `json:"bundle"` + Labels []string `json:"labels"` + Stdin string `json:"stdin"` + Stdout string `json:"stdout"` + Stderr string `json:"stderr"` + Runtime string `json:"runtime"` + RuntimeArgs []string `json:"runtimeArgs"` + Shim string `json:"shim"` + NoPivotRoot bool `json:"noPivotRoot"` +} + +type ProcessState struct { + specs.ProcessSpec + Exec bool `json:"exec"` + Stdin string `json:"containerdStdin"` + Stdout string `json:"containerdStdout"` + Stderr string `json:"containerdStderr"` + RuntimeArgs []string `json:"runtimeArgs"` + NoPivotRoot bool `json:"noPivotRoot"` + + PlatformProcessState +} diff --git a/vendor/github.com/docker/containerd/runtime/stats.go b/vendor/github.com/docker/containerd/runtime/stats.go new file mode 100644 index 00000000..de160bf6 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/stats.go @@ -0,0 +1,77 @@ +package runtime + +import "time" + +type Stat struct { + // Timestamp is the time that the statistics where collected + Timestamp time.Time + Cpu Cpu `json:"cpu"` + Memory Memory `json:"memory"` + Pids Pids `json:"pids"` + Blkio Blkio `json:"blkio"` + Hugetlb map[string]Hugetlb `json:"hugetlb"` +} + +type Hugetlb struct { + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +type BlkioEntry struct { + Major uint64 `json:"major,omitempty"` + Minor uint64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value uint64 `json:"value,omitempty"` +} + +type Blkio struct { + IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"` + IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"` + IoQueuedRecursive []BlkioEntry `json:"ioQueueRecursive,omitempty"` + IoServiceTimeRecursive []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"` + IoWaitTimeRecursive []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"` + IoMergedRecursive []BlkioEntry `json:"ioMergedRecursive,omitempty"` + IoTimeRecursive []BlkioEntry `json:"ioTimeRecursive,omitempty"` + SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"` +} + +type Pids struct { + Current uint64 `json:"current,omitempty"` + Limit uint64 `json:"limit,omitempty"` +} + +type Throttling struct { + Periods uint64 `json:"periods,omitempty"` + ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"` + ThrottledTime uint64 `json:"throttledTime,omitempty"` +} + +type CpuUsage struct { + // Units: nanoseconds. + Total uint64 `json:"total,omitempty"` + Percpu []uint64 `json:"percpu,omitempty"` + Kernel uint64 `json:"kernel"` + User uint64 `json:"user"` +} + +type Cpu struct { + Usage CpuUsage `json:"usage,omitempty"` + Throttling Throttling `json:"throttling,omitempty"` +} + +type MemoryEntry struct { + Limit uint64 `json:"limit"` + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +type Memory struct { + Cache uint64 `json:"cache,omitempty"` + Usage MemoryEntry `json:"usage,omitempty"` + Swap MemoryEntry `json:"swap,omitempty"` + Kernel MemoryEntry `json:"kernel,omitempty"` + KernelTCP MemoryEntry `json:"kernelTCP,omitempty"` + Raw map[string]uint64 `json:"raw,omitempty"` +} diff --git a/vendor/github.com/docker/containerd/specs/spec_linux.go b/vendor/github.com/docker/containerd/specs/spec_linux.go new file mode 100644 index 00000000..205f1c81 --- /dev/null +++ b/vendor/github.com/docker/containerd/specs/spec_linux.go @@ -0,0 +1,9 @@ +package specs + +import ocs "github.com/opencontainers/runtime-spec/specs-go" + +type ( + ProcessSpec ocs.Process + Spec ocs.Spec + Rlimit ocs.Rlimit +) diff --git a/vendor/github.com/docker/containerd/specs/spec_solaris.go b/vendor/github.com/docker/containerd/specs/spec_solaris.go new file mode 100644 index 00000000..625d27f1 --- /dev/null +++ b/vendor/github.com/docker/containerd/specs/spec_solaris.go @@ -0,0 +1,8 @@ +package specs + +import ocs "github.com/opencontainers/specs/specs-go" + +type ( + ProcessSpec ocs.Process + Spec ocs.Spec +) diff --git a/vendor/github.com/docker/containerd/subreaper/exec/copy.go b/vendor/github.com/docker/containerd/subreaper/exec/copy.go new file mode 100644 index 00000000..371aa998 --- /dev/null +++ b/vendor/github.com/docker/containerd/subreaper/exec/copy.go @@ -0,0 +1,54 @@ +package exec + +import ( + "bytes" + "errors" +) + +// Below is largely a copy from go's os/exec/exec.go + +// Run starts the specified command and waits for it to complete. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the command fails to run or doesn't complete successfully, the +// error is of type *ExitError. Other error types may be +// returned for I/O problems. +func (c *Cmd) Run() error { + if err := c.Start(); err != nil { + return translateError(err) + } + return translateError(c.Wait()) +} + +// Output runs the command and returns its standard output. +// Any returned error will usually be of type *ExitError. +// If c.Stderr was nil, Output populates ExitError.Stderr. +func (c *Cmd) Output() ([]byte, error) { + if c.Stdout != nil { + return nil, errors.New("exec: Stdout already set") + } + var stdout bytes.Buffer + c.Stdout = &stdout + + err := c.Run() + return stdout.Bytes(), err +} + +// CombinedOutput runs the command and returns its combined standard +// output and standard error. +func (c *Cmd) CombinedOutput() ([]byte, error) { + if c.Stdout != nil { + return nil, errors.New("exec: Stdout already set") + } + if c.Stderr != nil { + return nil, errors.New("exec: Stderr already set") + } + var b bytes.Buffer + c.Stdout = &b + c.Stderr = &b + err := c.Run() + return b.Bytes(), err +} diff --git a/vendor/github.com/docker/containerd/subreaper/exec/wrapper.go b/vendor/github.com/docker/containerd/subreaper/exec/wrapper.go new file mode 100644 index 00000000..2a6f2d64 --- /dev/null +++ b/vendor/github.com/docker/containerd/subreaper/exec/wrapper.go @@ -0,0 +1,81 @@ +package exec + +import ( + "fmt" + osExec "os/exec" + "strconv" + + "github.com/docker/containerd/subreaper" +) + +var ErrNotFound = osExec.ErrNotFound + +type Cmd struct { + osExec.Cmd + err error + sub *subreaper.Subscription +} + +type Error struct { + Name string + Err error +} + +func (e *Error) Error() string { + return "exec: " + strconv.Quote(e.Name) + ": " + e.Err.Error() +} + +type ExitCodeError struct { + Code int +} + +func (e ExitCodeError) Error() string { + return fmt.Sprintf("Non-zero exit code: %d", e.Code) +} + +func LookPath(file string) (string, error) { + v, err := osExec.LookPath(file) + return v, translateError(err) +} + +func Command(name string, args ...string) *Cmd { + return &Cmd{ + Cmd: *osExec.Command(name, args...), + } +} + +func (c *Cmd) Start() error { + c.sub = subreaper.Subscribe() + err := c.Cmd.Start() + if err != nil { + subreaper.Unsubscribe(c.sub) + c.sub = nil + c.err = translateError(err) + return c.err + } + + c.sub.SetPid(c.Cmd.Process.Pid) + return nil +} + +func (c *Cmd) Wait() error { + if c.sub == nil { + return c.err + } + exitCode := c.sub.Wait() + if exitCode == 0 { + return nil + } + return ExitCodeError{Code: exitCode} +} + +func translateError(err error) error { + switch v := err.(type) { + case *osExec.Error: + return &Error{ + Name: v.Name, + Err: v.Err, + } + } + return err +} diff --git a/vendor/github.com/docker/containerd/subreaper/reaper.go b/vendor/github.com/docker/containerd/subreaper/reaper.go new file mode 100644 index 00000000..f7932043 --- /dev/null +++ b/vendor/github.com/docker/containerd/subreaper/reaper.go @@ -0,0 +1,102 @@ +package subreaper + +import ( + "os" + "os/signal" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/osutils" +) + +var ( + subscriptions = map[int]*Subscription{} + subLock = sync.Mutex{} + counter = 0 + once = sync.Once{} +) + +type Subscription struct { + id int + exit osutils.Exit + c chan osutils.Exit + wg sync.WaitGroup +} + +func (s *Subscription) SetPid(pid int) { + go func() { + for exit := range s.c { + if exit.Pid == pid { + s.exit = exit + s.wg.Done() + Unsubscribe(s) + } + } + }() +} + +func (s *Subscription) Wait() int { + s.wg.Wait() + return s.exit.Status +} + +func Subscribe() *Subscription { + subLock.Lock() + defer subLock.Unlock() + + Start() + + counter++ + s := &Subscription{ + id: counter, + c: make(chan osutils.Exit, 1024), + } + s.wg.Add(1) + subscriptions[s.id] = s + return s +} + +func Unsubscribe(sub *Subscription) { + subLock.Lock() + defer subLock.Unlock() + + delete(subscriptions, sub.id) +} + +func Start() error { + var err error + once.Do(func() { + err = osutils.SetSubreaper(1) + if err != nil { + return + } + + s := make(chan os.Signal, 2048) + signal.Notify(s, syscall.SIGCHLD) + go childReaper(s) + }) + + return err +} + +func childReaper(s chan os.Signal) { + for range s { + exits, err := osutils.Reap() + if err == nil { + notify(exits) + } else { + logrus.WithField("error", err).Warn("containerd: reap child processes") + } + } +} + +func notify(exits []osutils.Exit) { + subLock.Lock() + for _, exit := range exits { + for _, sub := range subscriptions { + sub.c <- exit + } + } + subLock.Unlock() +} diff --git a/vendor/github.com/docker/containerd/supervisor/add_process.go b/vendor/github.com/docker/containerd/supervisor/add_process.go new file mode 100644 index 00000000..93678ddc --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/add_process.go @@ -0,0 +1,43 @@ +package supervisor + +import ( + "time" + + "github.com/docker/containerd/runtime" + "github.com/docker/containerd/specs" +) + +type AddProcessTask struct { + baseTask + ID string + PID string + Stdout string + Stderr string + Stdin string + ProcessSpec *specs.ProcessSpec + StartResponse chan StartResponse +} + +func (s *Supervisor) addProcess(t *AddProcessTask) error { + start := time.Now() + ci, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + process, err := ci.container.Exec(t.PID, *t.ProcessSpec, runtime.NewStdio(t.Stdin, t.Stdout, t.Stderr)) + if err != nil { + return err + } + if err := s.monitorProcess(process); err != nil { + return err + } + ExecProcessTimer.UpdateSince(start) + t.StartResponse <- StartResponse{} + s.notifySubscribers(Event{ + Timestamp: time.Now(), + Type: StateStartProcess, + PID: t.PID, + ID: t.ID, + }) + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/checkpoint.go b/vendor/github.com/docker/containerd/supervisor/checkpoint.go new file mode 100644 index 00000000..fb201f5c --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/checkpoint.go @@ -0,0 +1,35 @@ +// +build !windows + +package supervisor + +import "github.com/docker/containerd/runtime" + +type CreateCheckpointTask struct { + baseTask + ID string + CheckpointDir string + Checkpoint *runtime.Checkpoint +} + +func (s *Supervisor) createCheckpoint(t *CreateCheckpointTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + return i.container.Checkpoint(*t.Checkpoint, t.CheckpointDir) +} + +type DeleteCheckpointTask struct { + baseTask + ID string + CheckpointDir string + Checkpoint *runtime.Checkpoint +} + +func (s *Supervisor) deleteCheckpoint(t *DeleteCheckpointTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + return i.container.DeleteCheckpoint(t.Checkpoint.Name, t.CheckpointDir) +} diff --git a/vendor/github.com/docker/containerd/supervisor/create.go b/vendor/github.com/docker/containerd/supervisor/create.go new file mode 100644 index 00000000..12b1cd75 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/create.go @@ -0,0 +1,67 @@ +package supervisor + +import ( + "path/filepath" + "time" + + "github.com/docker/containerd/runtime" +) + +type StartTask struct { + baseTask + ID string + BundlePath string + Stdout string + Stderr string + Stdin string + StartResponse chan StartResponse + Labels []string + NoPivotRoot bool + Checkpoint *runtime.Checkpoint + CheckpointDir string + Runtime string + RuntimeArgs []string +} + +func (s *Supervisor) start(t *StartTask) error { + start := time.Now() + rt := s.runtime + rtArgs := s.runtimeArgs + if t.Runtime != "" { + rt = t.Runtime + rtArgs = t.RuntimeArgs + } + container, err := runtime.New(runtime.ContainerOpts{ + Root: s.stateDir, + ID: t.ID, + Bundle: t.BundlePath, + Runtime: rt, + RuntimeArgs: rtArgs, + Shim: s.shim, + Labels: t.Labels, + NoPivotRoot: t.NoPivotRoot, + Timeout: s.timeout, + }) + if err != nil { + return err + } + s.containers[t.ID] = &containerInfo{ + container: container, + } + ContainersCounter.Inc(1) + task := &startTask{ + Err: t.ErrorCh(), + Container: container, + StartResponse: t.StartResponse, + Stdin: t.Stdin, + Stdout: t.Stdout, + Stderr: t.Stderr, + } + if t.Checkpoint != nil { + task.CheckpointPath = filepath.Join(t.CheckpointDir, t.Checkpoint.Name) + } + + s.startTasks <- task + ContainerCreateTimer.UpdateSince(start) + return errDeferredResponse +} diff --git a/vendor/github.com/docker/containerd/supervisor/create_solaris.go b/vendor/github.com/docker/containerd/supervisor/create_solaris.go new file mode 100644 index 00000000..444e55bc --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/create_solaris.go @@ -0,0 +1,8 @@ +package supervisor + +type platformStartTask struct { +} + +// Checkpoint not supported on Solaris +func (task *startTask) setTaskCheckpoint(t *StartTask) { +} diff --git a/vendor/github.com/docker/containerd/supervisor/delete.go b/vendor/github.com/docker/containerd/supervisor/delete.go new file mode 100644 index 00000000..fd46f38b --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/delete.go @@ -0,0 +1,42 @@ +package supervisor + +import ( + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/runtime" +) + +type DeleteTask struct { + baseTask + ID string + Status int + PID string + NoEvent bool +} + +func (s *Supervisor) delete(t *DeleteTask) error { + if i, ok := s.containers[t.ID]; ok { + start := time.Now() + if err := s.deleteContainer(i.container); err != nil { + logrus.WithField("error", err).Error("containerd: deleting container") + } + if !t.NoEvent { + s.notifySubscribers(Event{ + Type: StateExit, + Timestamp: time.Now(), + ID: t.ID, + Status: t.Status, + PID: t.PID, + }) + } + ContainersCounter.Dec(1) + ContainerDeleteTimer.UpdateSince(start) + } + return nil +} + +func (s *Supervisor) deleteContainer(container runtime.Container) error { + delete(s.containers, container.ID()) + return container.Delete() +} diff --git a/vendor/github.com/docker/containerd/supervisor/errors.go b/vendor/github.com/docker/containerd/supervisor/errors.go new file mode 100644 index 00000000..3a996272 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/errors.go @@ -0,0 +1,24 @@ +package supervisor + +import "errors" + +var ( + // External errors + ErrTaskChanNil = errors.New("containerd: task channel is nil") + ErrBundleNotFound = errors.New("containerd: bundle not found") + ErrContainerNotFound = errors.New("containerd: container not found") + ErrContainerExists = errors.New("containerd: container already exists") + ErrProcessNotFound = errors.New("containerd: process not found for container") + ErrUnknownContainerStatus = errors.New("containerd: unknown container status ") + ErrUnknownTask = errors.New("containerd: unknown task type") + + // Internal errors + errShutdown = errors.New("containerd: supervisor is shutdown") + errRootNotAbs = errors.New("containerd: rootfs path is not an absolute path") + errNoContainerForPid = errors.New("containerd: pid not registered for any container") + // internal error where the handler will defer to another for the final response + // + // TODO: we could probably do a typed error with another error channel for this to make it + // less like magic + errDeferredResponse = errors.New("containerd: deferred response") +) diff --git a/vendor/github.com/docker/containerd/supervisor/exit.go b/vendor/github.com/docker/containerd/supervisor/exit.go new file mode 100644 index 00000000..8f19eee6 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/exit.go @@ -0,0 +1,81 @@ +package supervisor + +import ( + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/runtime" +) + +type ExitTask struct { + baseTask + Process runtime.Process +} + +func (s *Supervisor) exit(t *ExitTask) error { + start := time.Now() + proc := t.Process + status, err := proc.ExitStatus() + if err != nil { + logrus.WithFields(logrus.Fields{ + "error": err, + "pid": proc.ID(), + "id": proc.Container().ID(), + "systemPid": proc.SystemPid(), + }).Error("containerd: get exit status") + } + logrus.WithFields(logrus.Fields{ + "pid": proc.ID(), + "status": status, + "id": proc.Container().ID(), + "systemPid": proc.SystemPid(), + }).Debug("containerd: process exited") + + // if the process is the the init process of the container then + // fire a separate event for this process + if proc.ID() != runtime.InitProcessID { + ne := &ExecExitTask{ + ID: proc.Container().ID(), + PID: proc.ID(), + Status: status, + Process: proc, + } + s.SendTask(ne) + return nil + } + container := proc.Container() + ne := &DeleteTask{ + ID: container.ID(), + Status: status, + PID: proc.ID(), + } + s.SendTask(ne) + + ExitProcessTimer.UpdateSince(start) + + return nil +} + +type ExecExitTask struct { + baseTask + ID string + PID string + Status int + Process runtime.Process +} + +func (s *Supervisor) execExit(t *ExecExitTask) error { + container := t.Process.Container() + // exec process: we remove this process without notifying the main event loop + if err := container.RemoveProcess(t.PID); err != nil { + logrus.WithField("error", err).Error("containerd: find container for pid") + } + s.notifySubscribers(Event{ + Timestamp: time.Now(), + ID: t.ID, + Type: StateExit, + PID: t.PID, + Status: t.Status, + }) + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/get_containers.go b/vendor/github.com/docker/containerd/supervisor/get_containers.go new file mode 100644 index 00000000..23f49c78 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/get_containers.go @@ -0,0 +1,28 @@ +package supervisor + +import "github.com/docker/containerd/runtime" + +type GetContainersTask struct { + baseTask + ID string + Containers []runtime.Container +} + +func (s *Supervisor) getContainers(t *GetContainersTask) error { + + if t.ID != "" { + ci, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + t.Containers = append(t.Containers, ci.container) + + return nil + } + + for _, ci := range s.containers { + t.Containers = append(t.Containers, ci.container) + } + + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/machine.go b/vendor/github.com/docker/containerd/supervisor/machine.go new file mode 100644 index 00000000..7a21624b --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/machine.go @@ -0,0 +1,25 @@ +// +build !solaris + +package supervisor + +import "github.com/cloudfoundry/gosigar" + +type Machine struct { + Cpus int + Memory int64 +} + +func CollectMachineInformation() (Machine, error) { + m := Machine{} + cpu := sigar.CpuList{} + if err := cpu.Get(); err != nil { + return m, err + } + m.Cpus = len(cpu.List) + mem := sigar.Mem{} + if err := mem.Get(); err != nil { + return m, err + } + m.Memory = int64(mem.Total / 1024 / 1024) + return m, nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/machine_solaris.go b/vendor/github.com/docker/containerd/supervisor/machine_solaris.go new file mode 100644 index 00000000..c044705d --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/machine_solaris.go @@ -0,0 +1,15 @@ +package supervisor + +import ( + "errors" +) + +type Machine struct { + Cpus int + Memory int64 +} + +func CollectMachineInformation() (Machine, error) { + m := Machine{} + return m, errors.New("supervisor CollectMachineInformation not implemented on Solaris") +} diff --git a/vendor/github.com/docker/containerd/supervisor/metrics.go b/vendor/github.com/docker/containerd/supervisor/metrics.go new file mode 100644 index 00000000..2ba772a4 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/metrics.go @@ -0,0 +1,31 @@ +package supervisor + +import "github.com/rcrowley/go-metrics" + +var ( + ContainerCreateTimer = metrics.NewTimer() + ContainerDeleteTimer = metrics.NewTimer() + ContainerStartTimer = metrics.NewTimer() + ContainerStatsTimer = metrics.NewTimer() + ContainersCounter = metrics.NewCounter() + EventSubscriberCounter = metrics.NewCounter() + TasksCounter = metrics.NewCounter() + ExecProcessTimer = metrics.NewTimer() + ExitProcessTimer = metrics.NewTimer() + EpollFdCounter = metrics.NewCounter() +) + +func Metrics() map[string]interface{} { + return map[string]interface{}{ + "container-create-time": ContainerCreateTimer, + "container-delete-time": ContainerDeleteTimer, + "container-start-time": ContainerStartTimer, + "container-stats-time": ContainerStatsTimer, + "containers": ContainersCounter, + "event-subscribers": EventSubscriberCounter, + "tasks": TasksCounter, + "exec-process-time": ExecProcessTimer, + "exit-process-time": ExitProcessTimer, + "epoll-fds": EpollFdCounter, + } +} diff --git a/vendor/github.com/docker/containerd/supervisor/monitor_linux.go b/vendor/github.com/docker/containerd/supervisor/monitor_linux.go new file mode 100644 index 00000000..b1765853 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/monitor_linux.go @@ -0,0 +1,129 @@ +package supervisor + +import ( + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/archutils" + "github.com/docker/containerd/runtime" +) + +func NewMonitor() (*Monitor, error) { + m := &Monitor{ + receivers: make(map[int]interface{}), + exits: make(chan runtime.Process, 1024), + ooms: make(chan string, 1024), + } + fd, err := archutils.EpollCreate1(0) + if err != nil { + return nil, err + } + m.epollFd = fd + go m.start() + return m, nil +} + +type Monitor struct { + m sync.Mutex + receivers map[int]interface{} + exits chan runtime.Process + ooms chan string + epollFd int +} + +func (m *Monitor) Exits() chan runtime.Process { + return m.exits +} + +func (m *Monitor) OOMs() chan string { + return m.ooms +} + +func (m *Monitor) Monitor(p runtime.Process) error { + m.m.Lock() + defer m.m.Unlock() + fd := p.ExitFD() + event := syscall.EpollEvent{ + Fd: int32(fd), + Events: syscall.EPOLLHUP, + } + if err := archutils.EpollCtl(m.epollFd, syscall.EPOLL_CTL_ADD, fd, &event); err != nil { + return err + } + EpollFdCounter.Inc(1) + m.receivers[fd] = p + return nil +} + +func (m *Monitor) MonitorOOM(c runtime.Container) error { + m.m.Lock() + defer m.m.Unlock() + o, err := c.OOM() + if err != nil { + return err + } + fd := o.FD() + event := syscall.EpollEvent{ + Fd: int32(fd), + Events: syscall.EPOLLHUP | syscall.EPOLLIN, + } + if err := archutils.EpollCtl(m.epollFd, syscall.EPOLL_CTL_ADD, fd, &event); err != nil { + return err + } + EpollFdCounter.Inc(1) + m.receivers[fd] = o + return nil +} + +func (m *Monitor) Close() error { + return syscall.Close(m.epollFd) +} + +func (m *Monitor) start() { + var events [128]syscall.EpollEvent + for { + n, err := archutils.EpollWait(m.epollFd, events[:], -1) + if err != nil { + if err == syscall.EINTR { + continue + } + logrus.WithField("error", err).Fatal("containerd: epoll wait") + } + // process events + for i := 0; i < n; i++ { + fd := int(events[i].Fd) + m.m.Lock() + r := m.receivers[fd] + switch t := r.(type) { + case runtime.Process: + if events[i].Events == syscall.EPOLLHUP { + delete(m.receivers, fd) + if err = syscall.EpollCtl(m.epollFd, syscall.EPOLL_CTL_DEL, fd, &syscall.EpollEvent{ + Events: syscall.EPOLLHUP, + Fd: int32(fd), + }); err != nil { + logrus.WithField("error", err).Error("containerd: epoll remove fd") + } + if err := t.Close(); err != nil { + logrus.WithField("error", err).Error("containerd: close process IO") + } + EpollFdCounter.Dec(1) + m.exits <- t + } + case runtime.OOM: + // always flush the event fd + t.Flush() + if t.Removed() { + delete(m.receivers, fd) + // epoll will remove the fd from its set after it has been closed + t.Close() + EpollFdCounter.Dec(1) + } else { + m.ooms <- t.ContainerID() + } + } + m.m.Unlock() + } + } +} diff --git a/vendor/github.com/docker/containerd/supervisor/monitor_solaris.go b/vendor/github.com/docker/containerd/supervisor/monitor_solaris.go new file mode 100644 index 00000000..6ad56ac8 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/monitor_solaris.go @@ -0,0 +1,38 @@ +package supervisor + +import ( + "errors" + + "github.com/docker/containerd/runtime" +) + +func NewMonitor() (*Monitor, error) { + return &Monitor{}, errors.New("Monitor NewMonitor() not implemented on Solaris") +} + +type Monitor struct { + ooms chan string +} + +func (m *Monitor) Exits() chan runtime.Process { + return nil +} + +func (m *Monitor) OOMs() chan string { + return m.ooms +} + +func (m *Monitor) Monitor(p runtime.Process) error { + return errors.New("Monitor Monitor() not implemented on Solaris") +} + +func (m *Monitor) MonitorOOM(c runtime.Container) error { + return errors.New("Monitor MonitorOOM() not implemented on Solaris") +} + +func (m *Monitor) Close() error { + return errors.New("Monitor Close() not implemented on Solaris") +} + +func (m *Monitor) start() { +} diff --git a/vendor/github.com/docker/containerd/supervisor/oom.go b/vendor/github.com/docker/containerd/supervisor/oom.go new file mode 100644 index 00000000..e204696e --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/oom.go @@ -0,0 +1,22 @@ +package supervisor + +import ( + "time" + + "github.com/Sirupsen/logrus" +) + +type OOMTask struct { + baseTask + ID string +} + +func (s *Supervisor) oom(t *OOMTask) error { + logrus.WithField("id", t.ID).Debug("containerd: container oom") + s.notifySubscribers(Event{ + Timestamp: time.Now(), + ID: t.ID, + Type: StateOOM, + }) + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/signal.go b/vendor/github.com/docker/containerd/supervisor/signal.go new file mode 100644 index 00000000..0705fc56 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/signal.go @@ -0,0 +1,27 @@ +package supervisor + +import "os" + +type SignalTask struct { + baseTask + ID string + PID string + Signal os.Signal +} + +func (s *Supervisor) signal(t *SignalTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + processes, err := i.container.Processes() + if err != nil { + return err + } + for _, p := range processes { + if p.ID() == t.PID { + return p.Signal(t.Signal) + } + } + return ErrProcessNotFound +} diff --git a/vendor/github.com/docker/containerd/supervisor/sort.go b/vendor/github.com/docker/containerd/supervisor/sort.go new file mode 100644 index 00000000..2153b7e4 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/sort.go @@ -0,0 +1,27 @@ +package supervisor + +import ( + "sort" + + "github.com/docker/containerd/runtime" +) + +func sortProcesses(p []runtime.Process) { + sort.Sort(&processSorter{p}) +} + +type processSorter struct { + processes []runtime.Process +} + +func (s *processSorter) Len() int { + return len(s.processes) +} + +func (s *processSorter) Swap(i, j int) { + s.processes[i], s.processes[j] = s.processes[j], s.processes[i] +} + +func (s *processSorter) Less(i, j int) bool { + return s.processes[j].ID() == "init" +} diff --git a/vendor/github.com/docker/containerd/supervisor/stats.go b/vendor/github.com/docker/containerd/supervisor/stats.go new file mode 100644 index 00000000..00bf5f82 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/stats.go @@ -0,0 +1,33 @@ +package supervisor + +import ( + "time" + + "github.com/docker/containerd/runtime" +) + +type StatsTask struct { + baseTask + ID string + Stat chan *runtime.Stat +} + +func (s *Supervisor) stats(t *StatsTask) error { + start := time.Now() + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + // TODO: use workers for this + go func() { + s, err := i.container.Stats() + if err != nil { + t.ErrorCh() <- err + return + } + t.ErrorCh() <- nil + t.Stat <- s + ContainerStatsTimer.UpdateSince(start) + }() + return errDeferredResponse +} diff --git a/vendor/github.com/docker/containerd/supervisor/supervisor.go b/vendor/github.com/docker/containerd/supervisor/supervisor.go new file mode 100644 index 00000000..8cc253d4 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/supervisor.go @@ -0,0 +1,385 @@ +package supervisor + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/runtime" +) + +const ( + defaultBufferSize = 2048 // size of queue in eventloop +) + +// New returns an initialized Process supervisor. +func New(stateDir string, runtimeName, shimName string, runtimeArgs []string, timeout time.Duration, retainCount int) (*Supervisor, error) { + startTasks := make(chan *startTask, 10) + if err := os.MkdirAll(stateDir, 0755); err != nil { + return nil, err + } + machine, err := CollectMachineInformation() + if err != nil { + return nil, err + } + monitor, err := NewMonitor() + if err != nil { + return nil, err + } + s := &Supervisor{ + stateDir: stateDir, + containers: make(map[string]*containerInfo), + startTasks: startTasks, + machine: machine, + subscribers: make(map[chan Event]struct{}), + tasks: make(chan Task, defaultBufferSize), + monitor: monitor, + runtime: runtimeName, + runtimeArgs: runtimeArgs, + shim: shimName, + timeout: timeout, + } + if err := setupEventLog(s, retainCount); err != nil { + return nil, err + } + go s.exitHandler() + go s.oomHandler() + if err := s.restore(); err != nil { + return nil, err + } + return s, nil +} + +type containerInfo struct { + container runtime.Container +} + +func setupEventLog(s *Supervisor, retainCount int) error { + if err := readEventLog(s); err != nil { + return err + } + logrus.WithField("count", len(s.eventLog)).Debug("containerd: read past events") + events := s.Events(time.Time{}) + return eventLogger(s, filepath.Join(s.stateDir, "events.log"), events, retainCount) +} + +func eventLogger(s *Supervisor, path string, events chan Event, retainCount int) error { + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0755) + if err != nil { + return err + } + go func() { + var ( + count = len(s.eventLog) + enc = json.NewEncoder(f) + ) + for e := range events { + // if we have a specified retain count make sure the truncate the event + // log if it grows past the specified number of events to keep. + if retainCount > 0 { + if count > retainCount { + logrus.Debug("truncating event log") + // close the log file + if f != nil { + f.Close() + } + slice := retainCount - 1 + l := len(s.eventLog) + if slice >= l { + slice = l + } + s.eventLock.Lock() + s.eventLog = s.eventLog[len(s.eventLog)-slice:] + s.eventLock.Unlock() + if f, err = os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0755); err != nil { + logrus.WithField("error", err).Error("containerd: open event to journal") + continue + } + enc = json.NewEncoder(f) + count = 0 + for _, le := range s.eventLog { + if err := enc.Encode(le); err != nil { + logrus.WithField("error", err).Error("containerd: write event to journal") + } + } + } + } + s.eventLock.Lock() + s.eventLog = append(s.eventLog, e) + s.eventLock.Unlock() + count++ + if err := enc.Encode(e); err != nil { + logrus.WithField("error", err).Error("containerd: write event to journal") + } + } + }() + return nil +} + +func readEventLog(s *Supervisor) error { + f, err := os.Open(filepath.Join(s.stateDir, "events.log")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + dec := json.NewDecoder(f) + for { + var e Event + if err := dec.Decode(&e); err != nil { + if err == io.EOF { + break + } + return err + } + s.eventLog = append(s.eventLog, e) + } + return nil +} + +type Supervisor struct { + // stateDir is the directory on the system to store container runtime state information. + stateDir string + // name of the OCI compatible runtime used to execute containers + runtime string + runtimeArgs []string + shim string + containers map[string]*containerInfo + startTasks chan *startTask + // we need a lock around the subscribers map only because additions and deletions from + // the map are via the API so we cannot really control the concurrency + subscriberLock sync.RWMutex + subscribers map[chan Event]struct{} + machine Machine + tasks chan Task + monitor *Monitor + eventLog []Event + eventLock sync.Mutex + timeout time.Duration +} + +// Stop closes all startTasks and sends a SIGTERM to each container's pid1 then waits for they to +// terminate. After it has handled all the SIGCHILD events it will close the signals chan +// and exit. Stop is a non-blocking call and will return after the containers have been signaled +func (s *Supervisor) Stop() { + // Close the startTasks channel so that no new containers get started + close(s.startTasks) +} + +// Close closes any open files in the supervisor but expects that Stop has been +// callsed so that no more containers are started. +func (s *Supervisor) Close() error { + return nil +} + +type Event struct { + ID string `json:"id"` + Type string `json:"type"` + Timestamp time.Time `json:"timestamp"` + PID string `json:"pid,omitempty"` + Status int `json:"status,omitempty"` +} + +// Events returns an event channel that external consumers can use to receive updates +// on container events +func (s *Supervisor) Events(from time.Time) chan Event { + s.subscriberLock.Lock() + defer s.subscriberLock.Unlock() + c := make(chan Event, defaultBufferSize) + EventSubscriberCounter.Inc(1) + s.subscribers[c] = struct{}{} + if !from.IsZero() { + // replay old event + s.eventLock.Lock() + past := s.eventLog[:] + s.eventLock.Unlock() + for _, e := range past { + if e.Timestamp.After(from) { + c <- e + } + } + // Notify the client that from now on it's live events + c <- Event{ + Type: StateLive, + Timestamp: time.Now(), + } + } + return c +} + +// Unsubscribe removes the provided channel from receiving any more events +func (s *Supervisor) Unsubscribe(sub chan Event) { + s.subscriberLock.Lock() + defer s.subscriberLock.Unlock() + delete(s.subscribers, sub) + close(sub) + EventSubscriberCounter.Dec(1) +} + +// notifySubscribers will send the provided event to the external subscribers +// of the events channel +func (s *Supervisor) notifySubscribers(e Event) { + s.subscriberLock.RLock() + defer s.subscriberLock.RUnlock() + for sub := range s.subscribers { + // do a non-blocking send for the channel + select { + case sub <- e: + default: + logrus.WithField("event", e.Type).Warn("containerd: event not sent to subscriber") + } + } +} + +// Start is a non-blocking call that runs the supervisor for monitoring contianer processes and +// executing new containers. +// +// This event loop is the only thing that is allowed to modify state of containers and processes +// therefore it is save to do operations in the handlers that modify state of the system or +// state of the Supervisor +func (s *Supervisor) Start() error { + logrus.WithFields(logrus.Fields{ + "stateDir": s.stateDir, + "runtime": s.runtime, + "runtimeArgs": s.runtimeArgs, + "memory": s.machine.Memory, + "cpus": s.machine.Cpus, + }).Debug("containerd: supervisor running") + go func() { + for i := range s.tasks { + s.handleTask(i) + } + }() + return nil +} + +// Machine returns the machine information for which the +// supervisor is executing on. +func (s *Supervisor) Machine() Machine { + return s.machine +} + +// SendTask sends the provided event the the supervisors main event loop +func (s *Supervisor) SendTask(evt Task) { + TasksCounter.Inc(1) + s.tasks <- evt +} + +func (s *Supervisor) exitHandler() { + for p := range s.monitor.Exits() { + e := &ExitTask{ + Process: p, + } + s.SendTask(e) + } +} + +func (s *Supervisor) oomHandler() { + for id := range s.monitor.OOMs() { + e := &OOMTask{ + ID: id, + } + s.SendTask(e) + } +} + +func (s *Supervisor) monitorProcess(p runtime.Process) error { + return s.monitor.Monitor(p) +} + +func (s *Supervisor) restore() error { + dirs, err := ioutil.ReadDir(s.stateDir) + if err != nil { + return err + } + for _, d := range dirs { + if !d.IsDir() { + continue + } + id := d.Name() + container, err := runtime.Load(s.stateDir, id, s.timeout) + if err != nil { + return err + } + processes, err := container.Processes() + if err != nil { + return err + } + + ContainersCounter.Inc(1) + s.containers[id] = &containerInfo{ + container: container, + } + if err := s.monitor.MonitorOOM(container); err != nil && err != runtime.ErrContainerExited { + logrus.WithField("error", err).Error("containerd: notify OOM events") + } + logrus.WithField("id", id).Debug("containerd: container restored") + var exitedProcesses []runtime.Process + for _, p := range processes { + if p.State() == runtime.Running { + if err := s.monitorProcess(p); err != nil { + return err + } + } else { + exitedProcesses = append(exitedProcesses, p) + } + } + if len(exitedProcesses) > 0 { + // sort processes so that init is fired last because that is how the kernel sends the + // exit events + sortProcesses(exitedProcesses) + for _, p := range exitedProcesses { + e := &ExitTask{ + Process: p, + } + s.SendTask(e) + } + } + } + return nil +} + +func (s *Supervisor) handleTask(i Task) { + var err error + switch t := i.(type) { + case *AddProcessTask: + err = s.addProcess(t) + case *CreateCheckpointTask: + err = s.createCheckpoint(t) + case *DeleteCheckpointTask: + err = s.deleteCheckpoint(t) + case *StartTask: + err = s.start(t) + case *DeleteTask: + err = s.delete(t) + case *ExitTask: + err = s.exit(t) + case *ExecExitTask: + err = s.execExit(t) + case *GetContainersTask: + err = s.getContainers(t) + case *SignalTask: + err = s.signal(t) + case *StatsTask: + err = s.stats(t) + case *UpdateTask: + err = s.updateContainer(t) + case *UpdateProcessTask: + err = s.updateProcess(t) + case *OOMTask: + err = s.oom(t) + default: + err = ErrUnknownTask + } + if err != errDeferredResponse { + i.ErrorCh() <- err + close(i.ErrorCh()) + } +} diff --git a/vendor/github.com/docker/containerd/supervisor/task.go b/vendor/github.com/docker/containerd/supervisor/task.go new file mode 100644 index 00000000..4d6d1c50 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/task.go @@ -0,0 +1,33 @@ +package supervisor + +import ( + "sync" + + "github.com/docker/containerd/runtime" +) + +// StartResponse is the response containing a started container +type StartResponse struct { + Container runtime.Container +} + +// Task executes an action returning an error chan with either nil or +// the error from executing the task +type Task interface { + // ErrorCh returns a channel used to report and error from an async task + ErrorCh() chan error +} + +type baseTask struct { + errCh chan error + mu sync.Mutex +} + +func (t *baseTask) ErrorCh() chan error { + t.mu.Lock() + defer t.mu.Unlock() + if t.errCh == nil { + t.errCh = make(chan error, 1) + } + return t.errCh +} diff --git a/vendor/github.com/docker/containerd/supervisor/types.go b/vendor/github.com/docker/containerd/supervisor/types.go new file mode 100644 index 00000000..2e36fce0 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/types.go @@ -0,0 +1,12 @@ +package supervisor + +// State constants used in Event types +const ( + StateStart = "start-container" + StatePause = "pause" + StateResume = "resume" + StateExit = "exit" + StateStartProcess = "start-process" + StateOOM = "oom" + StateLive = "live" +) diff --git a/vendor/github.com/docker/containerd/supervisor/update.go b/vendor/github.com/docker/containerd/supervisor/update.go new file mode 100644 index 00000000..fe2f4776 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/update.go @@ -0,0 +1,92 @@ +package supervisor + +import ( + "time" + + "github.com/docker/containerd/runtime" +) + +type UpdateTask struct { + baseTask + ID string + State runtime.State + Resources *runtime.Resource +} + +func (s *Supervisor) updateContainer(t *UpdateTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + container := i.container + if t.State != "" { + switch t.State { + case runtime.Running: + if err := container.Resume(); err != nil { + return err + } + s.notifySubscribers(Event{ + ID: t.ID, + Type: StateResume, + Timestamp: time.Now(), + }) + case runtime.Paused: + if err := container.Pause(); err != nil { + return err + } + s.notifySubscribers(Event{ + ID: t.ID, + Type: StatePause, + Timestamp: time.Now(), + }) + default: + return ErrUnknownContainerStatus + } + return nil + } + if t.Resources != nil { + return container.UpdateResources(t.Resources) + } + return nil +} + +type UpdateProcessTask struct { + baseTask + ID string + PID string + CloseStdin bool + Width int + Height int +} + +func (s *Supervisor) updateProcess(t *UpdateProcessTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + processes, err := i.container.Processes() + if err != nil { + return err + } + var process runtime.Process + for _, p := range processes { + if p.ID() == t.PID { + process = p + break + } + } + if process == nil { + return ErrProcessNotFound + } + if t.CloseStdin { + if err := process.CloseStdin(); err != nil { + return err + } + } + if t.Width > 0 || t.Height > 0 { + if err := process.Resize(t.Width, t.Height); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/worker.go b/vendor/github.com/docker/containerd/supervisor/worker.go new file mode 100644 index 00000000..a7570b80 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/worker.go @@ -0,0 +1,74 @@ +package supervisor + +import ( + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/runtime" +) + +type Worker interface { + Start() +} + +type startTask struct { + Container runtime.Container + CheckpointPath string + Stdin string + Stdout string + Stderr string + Err chan error + StartResponse chan StartResponse +} + +func NewWorker(s *Supervisor, wg *sync.WaitGroup) Worker { + return &worker{ + s: s, + wg: wg, + } +} + +type worker struct { + wg *sync.WaitGroup + s *Supervisor +} + +func (w *worker) Start() { + defer w.wg.Done() + for t := range w.s.startTasks { + started := time.Now() + process, err := t.Container.Start(t.CheckpointPath, runtime.NewStdio(t.Stdin, t.Stdout, t.Stderr)) + if err != nil { + logrus.WithFields(logrus.Fields{ + "error": err, + "id": t.Container.ID(), + }).Error("containerd: start container") + t.Err <- err + evt := &DeleteTask{ + ID: t.Container.ID(), + NoEvent: true, + } + w.s.SendTask(evt) + continue + } + if err := w.s.monitor.MonitorOOM(t.Container); err != nil && err != runtime.ErrContainerExited { + if process.State() != runtime.Stopped { + logrus.WithField("error", err).Error("containerd: notify OOM events") + } + } + if err := w.s.monitorProcess(process); err != nil { + logrus.WithField("error", err).Error("containerd: add process to monitor") + } + ContainerStartTimer.UpdateSince(started) + t.Err <- nil + t.StartResponse <- StartResponse{ + Container: t.Container, + } + w.s.notifySubscribers(Event{ + Timestamp: time.Now(), + ID: t.Container.ID(), + Type: StateStart, + }) + } +} diff --git a/vendor/github.com/docker/containerd/version.go b/vendor/github.com/docker/containerd/version.go new file mode 100644 index 00000000..d516b41e --- /dev/null +++ b/vendor/github.com/docker/containerd/version.go @@ -0,0 +1,11 @@ +package containerd + +import "fmt" + +const VersionMajor = 0 +const VersionMinor = 2 +const VersionPatch = 0 + +var Version = fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch) + +var GitCommit = "" diff --git a/vendor/github.com/docker/distribution/.drone.yml b/vendor/github.com/docker/distribution/.drone.yml deleted file mode 100644 index d943e19f..00000000 --- a/vendor/github.com/docker/distribution/.drone.yml +++ /dev/null @@ -1,38 +0,0 @@ -image: dmp42/go:stable - -script: - # To be spoofed back into the test image - - go get github.com/modocache/gover - - - go get -t ./... - - # Go fmt - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - # Go lint - - test -z "$(golint ./... | tee /dev/stderr)" - # Go vet - - go vet ./... - # Go test - - go test -v -race -cover ./... - # Helper to concatenate reports - - gover - # Send to coverall - - goveralls -service drone.io -coverprofile=gover.coverprofile -repotoken {{COVERALLS_TOKEN}} - - # Do we want these as well? - # - go get code.google.com/p/go.tools/cmd/goimports - # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" - # http://labix.org/gocheck - -notify: - email: - recipients: - - distribution@docker.com - - slack: - team: docker - channel: "#dt" - username: mom - token: {{SLACK_TOKEN}} - on_success: true - on_failure: true diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap index e4e50ee7..f0452da6 100644 --- a/vendor/github.com/docker/distribution/.mailmap +++ b/vendor/github.com/docker/distribution/.mailmap @@ -13,3 +13,4 @@ Sharif Nassar Sharif Nassar Sven Dowideit Vincent Giersch Vincent Giersch davidli davidli +Omer Cohen Omer Cohen \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS index a44266b0..0857b62f 100644 --- a/vendor/github.com/docker/distribution/AUTHORS +++ b/vendor/github.com/docker/distribution/AUTHORS @@ -8,12 +8,15 @@ Alex Elman amitshukla Amy Lindburg Andrew Meredith +Andrew T Nguyen Andrey Kostov Andy Goldstein Anton Tiurin Antonio Mercado +Antonio Murdaca Arnaud Porterie Arthur Baars +Asuka Suzuki Avi Miller Ayose Cazorla BadZen @@ -37,7 +40,9 @@ Diogo Mónica DJ Enriquez Donald Huang Doug Davis +Eric Yang farmerworking +Felix Yan Florentin Raud Frederick F. Kautz IV gabriell nascimento @@ -45,29 +50,36 @@ harche Henri Gomez Hu Keping Hua Wang +HuKeping Ian Babrou +igayoso Jack Griffin Jason Freidman Jeff Nickoloff Jessie Frazelle Jianqing Wang +John Starks Jon Poler Jonathan Boulle Jordan Liggitt Josh Hawn Julien Fernandez +Keerthan Mala Kelsey Hightower Kenneth Lim Kenny Leung Li Yi Liu Hua +liuchang0812 Louis Kottmann Luke Carpenter Mary Anthony Matt Bentley +Matt Duch Matt Moore Matt Robenolt Michael Prokop +Michal Minar Miquel Sabaté Morgan Bauer moxiegirl @@ -78,6 +90,7 @@ Nuutti Kotivuori Oilbeater Olivier Gambier Olivier Jacques +Omer Cohen Patrick Devine Philip Misiowiec Richard Scothern @@ -90,6 +103,7 @@ Shawn Falkner-Horine Shreyas Karnik Simon Thulbourn Spencer Rinehart +Stefan Weil Stephen J Day Sungho Moon Sven Dowideit @@ -111,3 +125,4 @@ xg.song xiekeyang Yann ROBERT yuzou +姜继忠 diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md index 1a9ecb74..7cc7aedf 100644 --- a/vendor/github.com/docker/distribution/CONTRIBUTING.md +++ b/vendor/github.com/docker/distribution/CONTRIBUTING.md @@ -76,7 +76,7 @@ Some simple rules to ensure quick merge: You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. -If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. +If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. Then you should submit your implementation, clearly linking to the issue (and possible proposal). @@ -90,7 +90,7 @@ It's mandatory to: Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. -Have a look at a great, succesful contribution: the [Ceph driver PR](https://github.com/docker/distribution/pull/443) +Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) ## Coding Style diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile index 5329cee7..abb3e3bb 100644 --- a/vendor/github.com/docker/distribution/Dockerfile +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -1,12 +1,11 @@ -FROM golang:1.5.3 +FROM golang:1.6 RUN apt-get update && \ - apt-get install -y librados-dev apache2-utils && \ + apt-get install -y apache2-utils && \ rm -rf /var/lib/apt/lists/* ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution -ENV GOPATH $DISTRIBUTION_DIR/Godeps/_workspace:$GOPATH -ENV DOCKER_BUILDTAGS include_rados include_oss include_gcs +ENV DOCKER_BUILDTAGS include_oss include_gcs WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR @@ -16,4 +15,4 @@ RUN make PREFIX=/go clean binaries VOLUME ["/var/lib/registry"] EXPOSE 5000 ENTRYPOINT ["registry"] -CMD ["/etc/docker/registry/config.yml"] +CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile index 4604a39a..a0602d0b 100644 --- a/vendor/github.com/docker/distribution/Makefile +++ b/vendor/github.com/docker/distribution/Makefile @@ -14,8 +14,8 @@ endif GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" .PHONY: clean all fmt vet lint build test binaries -.DEFAULT: default -all: AUTHORS clean fmt vet fmt lint build test binaries +.DEFAULT: all +all: fmt vet lint build test binaries AUTHORS: .mailmap .git/HEAD git log --format='%aN <%aE>' | sort -fu > $@ @@ -24,51 +24,83 @@ AUTHORS: .mailmap .git/HEAD version/version.go: ./version/version.sh > $@ -${PREFIX}/bin/registry: version/version.go $(shell find . -type f -name '*.go') +# Required for go 1.5 to build +GO15VENDOREXPERIMENT := 1 + +# Package list +PKGS := $(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) + +# Resolving binary dependencies for specific targets +GOLINT := $(shell which golint || echo '') +GODEP := $(shell which godep || echo '') + +${PREFIX}/bin/registry: $(wildcard **/*.go) @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry -${PREFIX}/bin/digest: version/version.go $(shell find . -type f -name '*.go') +${PREFIX}/bin/digest: $(wildcard **/*.go) @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest -${PREFIX}/bin/registry-api-descriptor-template: version/version.go $(shell find . -type f -name '*.go') +${PREFIX}/bin/registry-api-descriptor-template: $(wildcard **/*.go) @echo "+ $@" @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template ./bin/registry-api-descriptor-template $< > $@ -# Depends on binaries because vet will silently fail if it can't load compiled -# imports -vet: binaries +vet: @echo "+ $@" - @go vet ./... + @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) fmt: @echo "+ $@" - @test -z "$$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" || \ - echo "+ please format Go code with 'gofmt -s'" + @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ + (echo >&2 "+ please format Go code with 'gofmt -s'" && false) lint: @echo "+ $@" - @test -z "$$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" + $(if $(GOLINT), , \ + $(error Please install golint: `go get -u github.com/golang/lint/golint`)) + @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" build: @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} ./... + @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) test: @echo "+ $@" - @go test -test.short -tags "${DOCKER_BUILDTAGS}" ./... + @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) test-full: @echo "+ $@" - @go test ./... + @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template @echo "+ $@" clean: @echo "+ $@" - @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/registry-api-descriptor-template" + @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" + +dep-save: + @echo "+ $@" + $(if $(GODEP), , \ + $(error Please install godep: go get github.com/tools/godep)) + @$(GODEP) save $(PKGS) + +dep-restore: + @echo "+ $@" + $(if $(GODEP), , \ + $(error Please install godep: go get github.com/tools/godep)) + @$(GODEP) restore -v + +dep-validate: dep-restore + @echo "+ $@" + @rm -Rf .vendor.bak + @mv vendor .vendor.bak + @rm -Rf Godeps + @$(GODEP) save ./... + @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ + (echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false) + @rm -Rf .vendor.bak diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md index e8262133..c21d4724 100644 --- a/vendor/github.com/docker/distribution/README.md +++ b/vendor/github.com/docker/distribution/README.md @@ -17,7 +17,7 @@ This repository contains the following components: |**Component** |Description | |--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with,distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | | **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | | **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | @@ -128,4 +128,4 @@ avenues are available for support: ## License -This project is distributed under [Apache License, Version 2.0](LICENSE.md). +This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go index ce43ea2e..1765e9f7 100644 --- a/vendor/github.com/docker/distribution/blobs.go +++ b/vendor/github.com/docker/distribution/blobs.go @@ -97,6 +97,11 @@ type BlobDeleter interface { Delete(ctx context.Context, dgst digest.Digest) error } +// BlobEnumerator enables iterating over blobs from storage +type BlobEnumerator interface { + Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error +} + // BlobDescriptorService manages metadata about a blob by digest. Most // implementations will not expose such an interface explicitly. Such mappings // should be maintained by interacting with the BlobIngester. Hence, this is @@ -184,9 +189,11 @@ type BlobCreateOption interface { // BlobWriteService.Resume. If supported by the store, a writer can be // recovered with the id. type BlobWriter interface { - io.WriteSeeker + io.WriteCloser io.ReaderFrom - io.Closer + + // Size returns the number of bytes written to this blob. + Size() int64 // ID returns the identifier for this writer. The ID can be used with the // Blob service to later resume the write. @@ -211,9 +218,6 @@ type BlobWriter interface { // result in a no-op. This allows use of Cancel in a defer statement, // increasing the assurance that it is correctly called. Cancel(ctx context.Context) error - - // Get a reader to the blob being written by this BlobWriter - Reader() (io.ReadCloser, error) } // BlobService combines the operations to access, read and write blobs. This diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml index e1995d4b..3d1ffd2f 100644 --- a/vendor/github.com/docker/distribution/circle.yml +++ b/vendor/github.com/docker/distribution/circle.yml @@ -3,15 +3,12 @@ machine: pre: # Install gvm - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) - # Install ceph to test rados driver & create pool - - sudo -i ~/distribution/contrib/ceph/ci-setup.sh - - ceph osd pool create docker-distribution 1 # Install codecov for coverage - pip install --user codecov post: # go - - gvm install go1.5.3 --prefer-binary --name=stable + - gvm install go1.6 --prefer-binary --name=stable environment: # Convenient shortcuts to "common" locations @@ -19,11 +16,9 @@ machine: BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME # Trick circle brainflat "no absolute path" behavior BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR - DOCKER_BUILDTAGS: "include_rados include_oss include_gcs" + DOCKER_BUILDTAGS: "include_oss include_gcs" # Workaround Circle parsing dumb bugs and/or YAML wonkyness CIRCLE_PAIN: "mode: set" - # Ceph config - RADOS_POOL: "docker-distribution" hosts: # Not used yet @@ -54,26 +49,30 @@ test: # - gvm use old && go version - gvm use stable && go version + # Ensure validation of dependencies + - gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi: + pwd: $BASE_STABLE + # First thing: build everything. This will catch compile errors, and it's # also necessary for go vet to work properly (see #807). - - gvm use stable && godep go install ./...: + - gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"): pwd: $BASE_STABLE # FMT - - gvm use stable && test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": + - gvm use stable && make fmt: pwd: $BASE_STABLE # VET - - gvm use stable && go vet ./...: + - gvm use stable && make vet: pwd: $BASE_STABLE # LINT - - gvm use stable && test -z "$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": + - gvm use stable && make lint: pwd: $BASE_STABLE override: # Test stable, and report - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': timeout: 600 pwd: $BASE_STABLE diff --git a/vendor/github.com/docker/distribution/context/doc.go b/vendor/github.com/docker/distribution/context/doc.go index 6fe1f817..3b4ab888 100644 --- a/vendor/github.com/docker/distribution/context/doc.go +++ b/vendor/github.com/docker/distribution/context/doc.go @@ -1,6 +1,6 @@ // Package context provides several utilities for working with // golang.org/x/net/context in http requests. Primarily, the focus is on -// logging relevent request information but this package is not limited to +// logging relevant request information but this package is not limited to // that purpose. // // The easiest way to get started is to get the background context: diff --git a/vendor/github.com/docker/distribution/context/http_test.go b/vendor/github.com/docker/distribution/context/http_test.go deleted file mode 100644 index 3d4b3c8e..00000000 --- a/vendor/github.com/docker/distribution/context/http_test.go +++ /dev/null @@ -1,285 +0,0 @@ -package context - -import ( - "net/http" - "net/http/httptest" - "net/http/httputil" - "net/url" - "reflect" - "testing" - "time" -) - -func TestWithRequest(t *testing.T) { - var req http.Request - - start := time.Now() - req.Method = "GET" - req.Host = "example.com" - req.RequestURI = "/test-test" - req.Header = make(http.Header) - req.Header.Set("Referer", "foo.com/referer") - req.Header.Set("User-Agent", "test/0.1") - - ctx := WithRequest(Background(), &req) - for _, testcase := range []struct { - key string - expected interface{} - }{ - { - key: "http.request", - expected: &req, - }, - { - key: "http.request.id", - }, - { - key: "http.request.method", - expected: req.Method, - }, - { - key: "http.request.host", - expected: req.Host, - }, - { - key: "http.request.uri", - expected: req.RequestURI, - }, - { - key: "http.request.referer", - expected: req.Referer(), - }, - { - key: "http.request.useragent", - expected: req.UserAgent(), - }, - { - key: "http.request.remoteaddr", - expected: req.RemoteAddr, - }, - { - key: "http.request.startedat", - }, - } { - v := ctx.Value(testcase.key) - - if v == nil { - t.Fatalf("value not found for %q", testcase.key) - } - - if testcase.expected != nil && v != testcase.expected { - t.Fatalf("%s: %v != %v", testcase.key, v, testcase.expected) - } - - // Key specific checks! - switch testcase.key { - case "http.request.id": - if _, ok := v.(string); !ok { - t.Fatalf("request id not a string: %v", v) - } - case "http.request.startedat": - vt, ok := v.(time.Time) - if !ok { - t.Fatalf("value not a time: %v", v) - } - - now := time.Now() - if vt.After(now) { - t.Fatalf("time generated too late: %v > %v", vt, now) - } - - if vt.Before(start) { - t.Fatalf("time generated too early: %v < %v", vt, start) - } - } - } -} - -type testResponseWriter struct { - flushed bool - status int - written int64 - header http.Header -} - -func (trw *testResponseWriter) Header() http.Header { - if trw.header == nil { - trw.header = make(http.Header) - } - - return trw.header -} - -func (trw *testResponseWriter) Write(p []byte) (n int, err error) { - if trw.status == 0 { - trw.status = http.StatusOK - } - - n = len(p) - trw.written += int64(n) - return -} - -func (trw *testResponseWriter) WriteHeader(status int) { - trw.status = status -} - -func (trw *testResponseWriter) Flush() { - trw.flushed = true -} - -func TestWithResponseWriter(t *testing.T) { - trw := testResponseWriter{} - ctx, rw := WithResponseWriter(Background(), &trw) - - if ctx.Value("http.response") != rw { - t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), rw) - } - - grw, err := GetResponseWriter(ctx) - if err != nil { - t.Fatalf("error getting response writer: %v", err) - } - - if grw != rw { - t.Fatalf("unexpected response writer returned: %#v != %#v", grw, rw) - } - - if ctx.Value("http.response.status") != 0 { - t.Fatalf("response status should always be a number and should be zero here: %v != 0", ctx.Value("http.response.status")) - } - - if n, err := rw.Write(make([]byte, 1024)); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } else if n != 1024 { - t.Fatalf("unexpected number of bytes written: %v != %v", n, 1024) - } - - if ctx.Value("http.response.status") != http.StatusOK { - t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusOK) - } - - if ctx.Value("http.response.written") != int64(1024) { - t.Fatalf("unexpected number reported bytes written: %v != %v", ctx.Value("http.response.written"), 1024) - } - - // Make sure flush propagates - rw.(http.Flusher).Flush() - - if !trw.flushed { - t.Fatalf("response writer not flushed") - } - - // Write another status and make sure context is correct. This normally - // wouldn't work except for in this contrived testcase. - rw.WriteHeader(http.StatusBadRequest) - - if ctx.Value("http.response.status") != http.StatusBadRequest { - t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusBadRequest) - } -} - -func TestWithVars(t *testing.T) { - var req http.Request - vars := map[string]string{ - "foo": "asdf", - "bar": "qwer", - } - - getVarsFromRequest = func(r *http.Request) map[string]string { - if r != &req { - t.Fatalf("unexpected request: %v != %v", r, req) - } - - return vars - } - - ctx := WithVars(Background(), &req) - for _, testcase := range []struct { - key string - expected interface{} - }{ - { - key: "vars", - expected: vars, - }, - { - key: "vars.foo", - expected: "asdf", - }, - { - key: "vars.bar", - expected: "qwer", - }, - } { - v := ctx.Value(testcase.key) - - if !reflect.DeepEqual(v, testcase.expected) { - t.Fatalf("%q: %v != %v", testcase.key, v, testcase.expected) - } - } -} - -// SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test -// RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten -// at the transport layer to 127.0.0.1: . However, as the X-Forwarded-For header -// just contains the IP address, it is different enough for testing. -func TestRemoteAddr(t *testing.T) { - var expectedRemote string - backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - - if r.RemoteAddr == expectedRemote { - t.Errorf("Unexpected matching remote addresses") - } - - actualRemote := RemoteAddr(r) - if expectedRemote != actualRemote { - t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote) - } - - w.WriteHeader(200) - })) - - defer backend.Close() - backendURL, err := url.Parse(backend.URL) - if err != nil { - t.Fatal(err) - } - - proxy := httputil.NewSingleHostReverseProxy(backendURL) - frontend := httptest.NewServer(proxy) - defer frontend.Close() - - // X-Forwarded-For set by proxy - expectedRemote = "127.0.0.1" - proxyReq, err := http.NewRequest("GET", frontend.URL, nil) - if err != nil { - t.Fatal(err) - } - - _, err = http.DefaultClient.Do(proxyReq) - if err != nil { - t.Fatal(err) - } - - // RemoteAddr in X-Real-Ip - getReq, err := http.NewRequest("GET", backend.URL, nil) - if err != nil { - t.Fatal(err) - } - - expectedRemote = "1.2.3.4" - getReq.Header["X-Real-ip"] = []string{expectedRemote} - _, err = http.DefaultClient.Do(getReq) - if err != nil { - t.Fatal(err) - } - - // Valid X-Real-Ip and invalid X-Forwarded-For - getReq.Header["X-forwarded-for"] = []string{"1.2.3"} - _, err = http.DefaultClient.Do(getReq) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/distribution/context/trace.go b/vendor/github.com/docker/distribution/context/trace.go index af4f1351..721964a8 100644 --- a/vendor/github.com/docker/distribution/context/trace.go +++ b/vendor/github.com/docker/distribution/context/trace.go @@ -10,7 +10,7 @@ import ( // WithTrace allocates a traced timing span in a new context. This allows a // caller to track the time between calling WithTrace and the returned done // function. When the done function is called, a log message is emitted with a -// "trace.duration" field, corresponding to the elapased time and a +// "trace.duration" field, corresponding to the elapsed time and a // "trace.func" field, corresponding to the function that called WithTrace. // // The logging keys "trace.id" and "trace.parent.id" are provided to implement diff --git a/vendor/github.com/docker/distribution/context/trace_test.go b/vendor/github.com/docker/distribution/context/trace_test.go deleted file mode 100644 index 4b969fbb..00000000 --- a/vendor/github.com/docker/distribution/context/trace_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package context - -import ( - "runtime" - "testing" - "time" -) - -// TestWithTrace ensures that tracing has the expected values in the context. -func TestWithTrace(t *testing.T) { - pc, file, _, _ := runtime.Caller(0) // get current caller. - f := runtime.FuncForPC(pc) - - base := []valueTestCase{ - { - key: "trace.id", - notnilorempty: true, - }, - - { - key: "trace.file", - expected: file, - notnilorempty: true, - }, - { - key: "trace.line", - notnilorempty: true, - }, - { - key: "trace.start", - notnilorempty: true, - }, - } - - ctx, done := WithTrace(Background()) - defer done("this will be emitted at end of test") - - checkContextForValues(t, ctx, append(base, valueTestCase{ - key: "trace.func", - expected: f.Name(), - })) - - traced := func() { - parentID := ctx.Value("trace.id") // ensure the parent trace id is correct. - - pc, _, _, _ := runtime.Caller(0) // get current caller. - f := runtime.FuncForPC(pc) - ctx, done := WithTrace(ctx) - defer done("this should be subordinate to the other trace") - time.Sleep(time.Second) - checkContextForValues(t, ctx, append(base, valueTestCase{ - key: "trace.func", - expected: f.Name(), - }, valueTestCase{ - key: "trace.parent.id", - expected: parentID, - })) - } - traced() - - time.Sleep(time.Second) -} - -type valueTestCase struct { - key string - expected interface{} - notnilorempty bool // just check not empty/not nil -} - -func checkContextForValues(t *testing.T, ctx Context, values []valueTestCase) { - - for _, testcase := range values { - v := ctx.Value(testcase.key) - if testcase.notnilorempty { - if v == nil || v == "" { - t.Fatalf("value was nil or empty for %q: %#v", testcase.key, v) - } - continue - } - - if v != testcase.expected { - t.Fatalf("unexpected value for key %q: %v != %v", testcase.key, v, testcase.expected) - } - } -} diff --git a/vendor/github.com/docker/distribution/context/util.go b/vendor/github.com/docker/distribution/context/util.go index 299edc00..cb9ef52e 100644 --- a/vendor/github.com/docker/distribution/context/util.go +++ b/vendor/github.com/docker/distribution/context/util.go @@ -8,25 +8,17 @@ import ( // since that time. If the key is not found, the value returned will be zero. // This is helpful when inferring metrics related to context execution times. func Since(ctx Context, key interface{}) time.Duration { - startedAtI := ctx.Value(key) - if startedAtI != nil { - if startedAt, ok := startedAtI.(time.Time); ok { - return time.Since(startedAt) - } + if startedAt, ok := ctx.Value(key).(time.Time); ok { + return time.Since(startedAt) } - return 0 } // GetStringValue returns a string value from the context. The empty string // will be returned if not found. func GetStringValue(ctx Context, key interface{}) (value string) { - stringi := ctx.Value(key) - if stringi != nil { - if valuev, ok := stringi.(string); ok { - value = valuev - } + if valuev, ok := ctx.Value(key).(string); ok { + value = valuev } - return value } diff --git a/vendor/github.com/docker/distribution/context/version_test.go b/vendor/github.com/docker/distribution/context/version_test.go deleted file mode 100644 index b8165269..00000000 --- a/vendor/github.com/docker/distribution/context/version_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package context - -import "testing" - -func TestVersionContext(t *testing.T) { - ctx := Background() - - if GetVersion(ctx) != "" { - t.Fatalf("context should not yet have a version") - } - - expected := "2.1-whatever" - ctx = WithVersion(ctx, expected) - version := GetVersion(ctx) - - if version != expected { - t.Fatalf("version was not set: %q != %q", version, expected) - } -} diff --git a/vendor/github.com/docker/distribution/coverpkg.sh b/vendor/github.com/docker/distribution/coverpkg.sh index 7ee751ab..25d419ae 100755 --- a/vendor/github.com/docker/distribution/coverpkg.sh +++ b/vendor/github.com/docker/distribution/coverpkg.sh @@ -3,5 +3,5 @@ # need to be passed to `go test -coverpkg`: this includes all of the # subpackage's dependencies within the containing package, as well as the # subpackage itself. -DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2})" +DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/vendor/github.com/docker/distribution/digest/digest_test.go b/vendor/github.com/docker/distribution/digest/digest_test.go deleted file mode 100644 index afb4ebf6..00000000 --- a/vendor/github.com/docker/distribution/digest/digest_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package digest - -import ( - "testing" -) - -func TestParseDigest(t *testing.T) { - for _, testcase := range []struct { - input string - err error - algorithm Algorithm - hex string - }{ - { - input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - algorithm: "sha256", - hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - { - input: "sha384:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", - algorithm: "sha384", - hex: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", - }, - { - // empty hex - input: "sha256:", - err: ErrDigestInvalidFormat, - }, - { - // just hex - input: "d41d8cd98f00b204e9800998ecf8427e", - err: ErrDigestInvalidFormat, - }, - { - // not hex - input: "sha256:d41d8cd98f00b204e9800m98ecf8427e", - err: ErrDigestInvalidFormat, - }, - { - // too short - input: "sha256:abcdef0123456789", - err: ErrDigestInvalidLength, - }, - { - // too short (from different algorithm) - input: "sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - err: ErrDigestInvalidLength, - }, - { - input: "foo:d41d8cd98f00b204e9800998ecf8427e", - err: ErrDigestUnsupported, - }, - } { - digest, err := ParseDigest(testcase.input) - if err != testcase.err { - t.Fatalf("error differed from expected while parsing %q: %v != %v", testcase.input, err, testcase.err) - } - - if testcase.err != nil { - continue - } - - if digest.Algorithm() != testcase.algorithm { - t.Fatalf("incorrect algorithm for parsed digest: %q != %q", digest.Algorithm(), testcase.algorithm) - } - - if digest.Hex() != testcase.hex { - t.Fatalf("incorrect hex for parsed digest: %q != %q", digest.Hex(), testcase.hex) - } - - // Parse string return value and check equality - newParsed, err := ParseDigest(digest.String()) - - if err != nil { - t.Fatalf("unexpected error parsing input %q: %v", testcase.input, err) - } - - if newParsed != digest { - t.Fatalf("expected equal: %q != %q", newParsed, digest) - } - } -} diff --git a/vendor/github.com/docker/distribution/digest/digester_resumable_test.go b/vendor/github.com/docker/distribution/digest/digester_resumable_test.go deleted file mode 100644 index 6ba21c80..00000000 --- a/vendor/github.com/docker/distribution/digest/digester_resumable_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !noresumabledigest - -package digest - -import ( - "testing" - - "github.com/stevvooe/resumable" - _ "github.com/stevvooe/resumable/sha256" -) - -// TestResumableDetection just ensures that the resumable capability of a hash -// is exposed through the digester type, which is just a hash plus a Digest -// method. -func TestResumableDetection(t *testing.T) { - d := Canonical.New() - - if _, ok := d.Hash().(resumable.Hash); !ok { - t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash()) - } -} diff --git a/vendor/github.com/docker/distribution/digest/set.go b/vendor/github.com/docker/distribution/digest/set.go index 3fac41b4..4b9313c1 100644 --- a/vendor/github.com/docker/distribution/digest/set.go +++ b/vendor/github.com/docker/distribution/digest/set.go @@ -22,7 +22,7 @@ var ( // may be easily referenced by easily referenced by a string // representation of the digest as well as short representation. // The uniqueness of the short representation is based on other -// digests in the set. If digests are ommited from this set, +// digests in the set. If digests are omitted from this set, // collisions in a larger set may not be detected, therefore it // is important to always do short representation lookups on // the complete set of digests. To mitigate collisions, an diff --git a/vendor/github.com/docker/distribution/digest/set_test.go b/vendor/github.com/docker/distribution/digest/set_test.go deleted file mode 100644 index e9dab879..00000000 --- a/vendor/github.com/docker/distribution/digest/set_test.go +++ /dev/null @@ -1,368 +0,0 @@ -package digest - -import ( - "crypto/sha256" - "encoding/binary" - "math/rand" - "testing" -) - -func assertEqualDigests(t *testing.T, d1, d2 Digest) { - if d1 != d2 { - t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2) - } -} - -func TestLookup(t *testing.T) { - digests := []Digest{ - "sha256:1234511111111111111111111111111111111111111111111111111111111111", - "sha256:1234111111111111111111111111111111111111111111111111111111111111", - "sha256:1234611111111111111111111111111111111111111111111111111111111111", - "sha256:5432111111111111111111111111111111111111111111111111111111111111", - "sha256:6543111111111111111111111111111111111111111111111111111111111111", - "sha256:6432111111111111111111111111111111111111111111111111111111111111", - "sha256:6542111111111111111111111111111111111111111111111111111111111111", - "sha256:6532111111111111111111111111111111111111111111111111111111111111", - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - dgst, err := dset.Lookup("54") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[3]) - - dgst, err = dset.Lookup("1234") - if err == nil { - t.Fatal("Expected ambiguous error looking up: 1234") - } - if err != ErrDigestAmbiguous { - t.Fatal(err) - } - - dgst, err = dset.Lookup("9876") - if err == nil { - t.Fatal("Expected ambiguous error looking up: 9876") - } - if err != ErrDigestNotFound { - t.Fatal(err) - } - - dgst, err = dset.Lookup("sha256:1234") - if err == nil { - t.Fatal("Expected ambiguous error looking up: sha256:1234") - } - if err != ErrDigestAmbiguous { - t.Fatal(err) - } - - dgst, err = dset.Lookup("sha256:12345") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[0]) - - dgst, err = dset.Lookup("sha256:12346") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[2]) - - dgst, err = dset.Lookup("12346") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[2]) - - dgst, err = dset.Lookup("12345") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[0]) -} - -func TestAddDuplication(t *testing.T) { - digests := []Digest{ - "sha256:1234111111111111111111111111111111111111111111111111111111111111", - "sha256:1234511111111111111111111111111111111111111111111111111111111111", - "sha256:1234611111111111111111111111111111111111111111111111111111111111", - "sha256:5432111111111111111111111111111111111111111111111111111111111111", - "sha256:6543111111111111111111111111111111111111111111111111111111111111", - "sha512:65431111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", - "sha512:65421111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", - "sha512:65321111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - if len(dset.entries) != 8 { - t.Fatal("Invalid dset size") - } - - if err := dset.Add(Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil { - t.Fatal(err) - } - - if len(dset.entries) != 8 { - t.Fatal("Duplicate digest insert allowed") - } - - if err := dset.Add(Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil { - t.Fatal(err) - } - - if len(dset.entries) != 9 { - t.Fatal("Insert with different algorithm not allowed") - } -} - -func TestRemove(t *testing.T) { - digests, err := createDigests(10) - if err != nil { - t.Fatal(err) - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - dgst, err := dset.Lookup(digests[0].String()) - if err != nil { - t.Fatal(err) - } - if dgst != digests[0] { - t.Fatalf("Unexpected digest value:\n\tExpected: %s\n\tActual: %s", digests[0], dgst) - } - - if err := dset.Remove(digests[0]); err != nil { - t.Fatal(err) - } - - if _, err := dset.Lookup(digests[0].String()); err != ErrDigestNotFound { - t.Fatalf("Expected error %v when looking up removed digest, got %v", ErrDigestNotFound, err) - } -} - -func TestAll(t *testing.T) { - digests, err := createDigests(100) - if err != nil { - t.Fatal(err) - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - all := map[Digest]struct{}{} - for _, dgst := range dset.All() { - all[dgst] = struct{}{} - } - - if len(all) != len(digests) { - t.Fatalf("Unexpected number of unique digests found:\n\tExpected: %d\n\tActual: %d", len(digests), len(all)) - } - - for i, dgst := range digests { - if _, ok := all[dgst]; !ok { - t.Fatalf("Missing element at position %d: %s", i, dgst) - } - } - -} - -func assertEqualShort(t *testing.T, actual, expected string) { - if actual != expected { - t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual) - } -} - -func TestShortCodeTable(t *testing.T) { - digests := []Digest{ - "sha256:1234111111111111111111111111111111111111111111111111111111111111", - "sha256:1234511111111111111111111111111111111111111111111111111111111111", - "sha256:1234611111111111111111111111111111111111111111111111111111111111", - "sha256:5432111111111111111111111111111111111111111111111111111111111111", - "sha256:6543111111111111111111111111111111111111111111111111111111111111", - "sha256:6432111111111111111111111111111111111111111111111111111111111111", - "sha256:6542111111111111111111111111111111111111111111111111111111111111", - "sha256:6532111111111111111111111111111111111111111111111111111111111111", - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - dump := ShortCodeTable(dset, 2) - - if len(dump) < len(digests) { - t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests)) - } - assertEqualShort(t, dump[digests[0]], "12341") - assertEqualShort(t, dump[digests[1]], "12345") - assertEqualShort(t, dump[digests[2]], "12346") - assertEqualShort(t, dump[digests[3]], "54") - assertEqualShort(t, dump[digests[4]], "6543") - assertEqualShort(t, dump[digests[5]], "64") - assertEqualShort(t, dump[digests[6]], "6542") - assertEqualShort(t, dump[digests[7]], "653") -} - -func createDigests(count int) ([]Digest, error) { - r := rand.New(rand.NewSource(25823)) - digests := make([]Digest, count) - for i := range digests { - h := sha256.New() - if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil { - return nil, err - } - digests[i] = NewDigest("sha256", h) - } - return digests, nil -} - -func benchAddNTable(b *testing.B, n int) { - digests, err := createDigests(n) - if err != nil { - b.Fatal(err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} - for j := range digests { - if err = dset.Add(digests[j]); err != nil { - b.Fatal(err) - } - } - } -} - -func benchLookupNTable(b *testing.B, n int, shortLen int) { - digests, err := createDigests(n) - if err != nil { - b.Fatal(err) - } - dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - b.Fatal(err) - } - } - shorts := make([]string, 0, n) - for _, short := range ShortCodeTable(dset, shortLen) { - shorts = append(shorts, short) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err = dset.Lookup(shorts[i%n]); err != nil { - b.Fatal(err) - } - } -} - -func benchRemoveNTable(b *testing.B, n int) { - digests, err := createDigests(n) - if err != nil { - b.Fatal(err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} - b.StopTimer() - for j := range digests { - if err = dset.Add(digests[j]); err != nil { - b.Fatal(err) - } - } - b.StartTimer() - for j := range digests { - if err = dset.Remove(digests[j]); err != nil { - b.Fatal(err) - } - } - } -} - -func benchShortCodeNTable(b *testing.B, n int, shortLen int) { - digests, err := createDigests(n) - if err != nil { - b.Fatal(err) - } - dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - b.Fatal(err) - } - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - ShortCodeTable(dset, shortLen) - } -} - -func BenchmarkAdd10(b *testing.B) { - benchAddNTable(b, 10) -} - -func BenchmarkAdd100(b *testing.B) { - benchAddNTable(b, 100) -} - -func BenchmarkAdd1000(b *testing.B) { - benchAddNTable(b, 1000) -} - -func BenchmarkRemove10(b *testing.B) { - benchRemoveNTable(b, 10) -} - -func BenchmarkRemove100(b *testing.B) { - benchRemoveNTable(b, 100) -} - -func BenchmarkRemove1000(b *testing.B) { - benchRemoveNTable(b, 1000) -} - -func BenchmarkLookup10(b *testing.B) { - benchLookupNTable(b, 10, 12) -} - -func BenchmarkLookup100(b *testing.B) { - benchLookupNTable(b, 100, 12) -} - -func BenchmarkLookup1000(b *testing.B) { - benchLookupNTable(b, 1000, 12) -} - -func BenchmarkShortCode10(b *testing.B) { - benchShortCodeNTable(b, 10, 12) -} -func BenchmarkShortCode100(b *testing.B) { - benchShortCodeNTable(b, 100, 12) -} -func BenchmarkShortCode1000(b *testing.B) { - benchShortCodeNTable(b, 1000, 12) -} diff --git a/vendor/github.com/docker/distribution/digest/verifiers_test.go b/vendor/github.com/docker/distribution/digest/verifiers_test.go deleted file mode 100644 index c342d6e7..00000000 --- a/vendor/github.com/docker/distribution/digest/verifiers_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package digest - -import ( - "bytes" - "crypto/rand" - "io" - "testing" -) - -func TestDigestVerifier(t *testing.T) { - p := make([]byte, 1<<20) - rand.Read(p) - digest := FromBytes(p) - - verifier, err := NewDigestVerifier(digest) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, bytes.NewReader(p)) - - if !verifier.Verified() { - t.Fatalf("bytes not verified") - } -} - -// TestVerifierUnsupportedDigest ensures that unsupported digest validation is -// flowing through verifier creation. -func TestVerifierUnsupportedDigest(t *testing.T) { - unsupported := Digest("bean:0123456789abcdef") - - _, err := NewDigestVerifier(unsupported) - if err == nil { - t.Fatalf("expected error when creating verifier") - } - - if err != ErrDigestUnsupported { - t.Fatalf("incorrect error for unsupported digest: %v", err) - } -} - -// TODO(stevvooe): Add benchmarks to measure bytes/second throughput for -// DigestVerifier. -// -// The relevant benchmark for comparison can be run with the following -// commands: -// -// go test -bench . crypto/sha1 -// diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go index 77bd096e..c20f2811 100644 --- a/vendor/github.com/docker/distribution/errors.go +++ b/vendor/github.com/docker/distribution/errors.go @@ -8,6 +8,10 @@ import ( "github.com/docker/distribution/digest" ) +// ErrAccessDenied is returned when an access to a requested resource is +// denied. +var ErrAccessDenied = errors.New("access denied") + // ErrManifestNotModified is returned when a conditional manifest GetByTag // returns nil due to the client indicating it has the latest version var ErrManifestNotModified = errors.New("manifest not modified") diff --git a/vendor/github.com/docker/distribution/manifest/doc.go b/vendor/github.com/docker/distribution/manifest/doc.go new file mode 100644 index 00000000..88367b0a --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/doc.go @@ -0,0 +1 @@ +package manifest diff --git a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go new file mode 100644 index 00000000..a2082ec0 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go @@ -0,0 +1,155 @@ +package manifestlist + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +// MediaTypeManifestList specifies the mediaType for manifest lists. +const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + +// SchemaVersion provides a pre-initialized version structure for this +// packages version of the manifest. +var SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifestList, +} + +func init() { + manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// PlatformSpec specifies a platform where a particular image manifest is +// applicable. +type PlatformSpec struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example `10.0.10586`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifying an array of strings, each + // listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} + +// A ManifestDescriptor references a platform-specific manifest. +type ManifestDescriptor struct { + distribution.Descriptor + + // Platform specifies which platform the manifest pointed to by the + // descriptor runs on. + Platform PlatformSpec `json:"platform"` +} + +// ManifestList references manifests for various platforms. +type ManifestList struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Manifests []ManifestDescriptor `json:"manifests"` +} + +// References returnes the distribution descriptors for the referenced image +// manifests. +func (m ManifestList) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(m.Manifests)) + for i := range m.Manifests { + dependencies[i] = m.Manifests[i].Descriptor + } + + return dependencies +} + +// DeserializedManifestList wraps ManifestList with a copy of the original +// JSON. +type DeserializedManifestList struct { + ManifestList + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromDescriptors takes a slice of descriptors, and returns a +// DeserializedManifestList which contains the resulting manifest list +// and its JSON representation. +func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { + m := ManifestList{ + Versioned: SchemaVersion, + } + + m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) + copy(m.Manifests, descriptors) + + deserialized := DeserializedManifestList{ + ManifestList: m, + } + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new ManifestList struct from JSON data. +func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest list in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into ManifestList object + var manifestList ManifestList + if err := json.Unmarshal(m.canonical, &manifestList); err != nil { + return err + } + + m.ManifestList = manifestList + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifestList") +} + +// Payload returns the raw content of the manifest list. The contents can be +// used to calculate the content identifier. +func (m DeserializedManifestList) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go new file mode 100644 index 00000000..b3d1e554 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go @@ -0,0 +1,281 @@ +package schema1 + +import ( + "crypto/sha512" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +type diffID digest.Digest + +// gzippedEmptyTar is a gzip-compressed version of an empty tar file +// (1024 NULL bytes) +var gzippedEmptyTar = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// digestSHA256GzippedEmptyTar is the canonical sha256 digest of +// gzippedEmptyTar +const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +// configManifestBuilder is a type for constructing manifests from an image +// configuration and generic descriptors. +type configManifestBuilder struct { + // bs is a BlobService used to create empty layer tars in the + // blob store if necessary. + bs distribution.BlobService + // pk is the libtrust private key used to sign the final manifest. + pk libtrust.PrivateKey + // configJSON is configuration supplied when the ManifestBuilder was + // created. + configJSON []byte + // ref contains the name and optional tag provided to NewConfigManifestBuilder. + ref reference.Named + // descriptors is the set of descriptors referencing the layers. + descriptors []distribution.Descriptor + // emptyTarDigest is set to a valid digest if an empty tar has been + // put in the blob store; otherwise it is empty. + emptyTarDigest digest.Digest +} + +// NewConfigManifestBuilder is used to build new manifests for the current +// schema version from an image configuration and a set of descriptors. +// It takes a BlobService so that it can add an empty tar to the blob store +// if the resulting manifest needs empty layers. +func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder { + return &configManifestBuilder{ + bs: bs, + pk: pk, + configJSON: configJSON, + ref: ref, + } +} + +// Build produces a final manifest from the given references +func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { + type imageRootFS struct { + Type string `json:"type"` + DiffIDs []diffID `json:"diff_ids,omitempty"` + BaseLayer string `json:"base_layer,omitempty"` + } + + type imageHistory struct { + Created time.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` + } + + type imageConfig struct { + RootFS *imageRootFS `json:"rootfs,omitempty"` + History []imageHistory `json:"history,omitempty"` + Architecture string `json:"architecture,omitempty"` + } + + var img imageConfig + + if err := json.Unmarshal(mb.configJSON, &img); err != nil { + return nil, err + } + + if len(img.History) == 0 { + return nil, errors.New("empty history when trying to create schema1 manifest") + } + + if len(img.RootFS.DiffIDs) != len(mb.descriptors) { + return nil, errors.New("number of descriptors and number of layers in rootfs must match") + } + + // Generate IDs for each layer + // For non-top-level layers, create fake V1Compatibility strings that + // fit the format and don't collide with anything else, but don't + // result in runnable images on their own. + type v1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + } + + fsLayerList := make([]FSLayer, len(img.History)) + history := make([]History, len(img.History)) + + parent := "" + layerCounter := 0 + for i, h := range img.History[:len(img.History)-1] { + var blobsum digest.Digest + if h.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + layerCounter++ + } + + v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() + + if i == 0 && img.RootFS.BaseLayer != "" { + // windows-only baselayer setup + baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) + parent = fmt.Sprintf("%x", baseID[:32]) + } + + v1Compatibility := v1Compatibility{ + ID: v1ID, + Parent: parent, + Comment: h.Comment, + Created: h.Created, + } + v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} + if h.EmptyLayer { + v1Compatibility.ThrowAway = true + } + jsonBytes, err := json.Marshal(&v1Compatibility) + if err != nil { + return nil, err + } + + reversedIndex := len(img.History) - i - 1 + history[reversedIndex].V1Compatibility = string(jsonBytes) + fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} + + parent = v1ID + } + + latestHistory := img.History[len(img.History)-1] + + var blobsum digest.Digest + if latestHistory.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + } + + fsLayerList[0] = FSLayer{BlobSum: blobsum} + dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) + + // Top-level v1compatibility string should be a modified version of the + // image config. + transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) + if err != nil { + return nil, err + } + + history[0].V1Compatibility = string(transformedConfig) + + tag := "" + if tagged, isTagged := mb.ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + mfst := Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: mb.ref.Name(), + Tag: tag, + Architecture: img.Architecture, + FSLayers: fsLayerList, + History: history, + } + + return Sign(&mfst, mb.pk) +} + +// emptyTar pushes a compressed empty tar to the blob store if one doesn't +// already exist, and returns its blobsum. +func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { + if mb.emptyTarDigest != "" { + // Already put an empty tar + return mb.emptyTarDigest, nil + } + + descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar) + switch err { + case nil: + mb.emptyTarDigest = descriptor.Digest + return descriptor.Digest, nil + case distribution.ErrBlobUnknown: + // nop + default: + return "", err + } + + // Add gzipped empty tar to the blob store + descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar) + if err != nil { + return "", err + } + + mb.emptyTarDigest = descriptor.Digest + + return descriptor.Digest, nil +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { + // todo: verification here? + mb.descriptors = append(mb.descriptors, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder +func (mb *configManifestBuilder) References() []distribution.Descriptor { + return mb.descriptors +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON +func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(configJSON, &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + delete(configAsMap, "rootfs") + delete(configAsMap, "history") + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/manifest.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go new file mode 100644 index 00000000..bff47bde --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go @@ -0,0 +1,184 @@ +package schema1 + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/libtrust" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. Note + // that for schema version 1, the the media is optionally "application/json". + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version + MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // MediaTypeManifestLayer specifies the media type for manifest layers + MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 1, + } +) + +func init() { + schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + sm := new(SignedManifest) + err := sm.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + desc := distribution.Descriptor{ + Digest: digest.FromBytes(sm.Canonical), + Size: int64(len(sm.Canonical)), + MediaType: MediaTypeSignedManifest, + } + return sm, desc, err + } + err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("application/json", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// History stores unstructured v1 compatibility information +type History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// Manifest provides the base accessible fields for working with V2 image +// format in the registry. +type Manifest struct { + manifest.Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []History `json:"history"` +} + +// SignedManifest provides an envelope for a signed image manifest, including +// the format sensitive raw bytes. +type SignedManifest struct { + Manifest + + // Canonical is the canonical byte representation of the ImageManifest, + // without any attached signatures. The manifest byte + // representation cannot change or it will have to be re-signed. + Canonical []byte `json:"-"` + + // all contains the byte representation of the Manifest including signatures + // and is returned by Payload() + all []byte +} + +// UnmarshalJSON populates a new SignedManifest struct from JSON data. +func (sm *SignedManifest) UnmarshalJSON(b []byte) error { + sm.all = make([]byte, len(b), len(b)) + // store manifest and signatures in all + copy(sm.all, b) + + jsig, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return err + } + + // Resolve the payload in the manifest. + bytes, err := jsig.Payload() + if err != nil { + return err + } + + // sm.Canonical stores the canonical manifest JSON + sm.Canonical = make([]byte, len(bytes), len(bytes)) + copy(sm.Canonical, bytes) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { + return err + } + + sm.Manifest = manifest + + return nil +} + +// References returnes the descriptors of this manifests references +func (sm SignedManifest) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) + for i, fsLayer := range sm.FSLayers { + dependencies[i] = distribution.Descriptor{ + MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", + Digest: fsLayer.BlobSum, + } + } + + return dependencies + +} + +// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner +// contents. Applications requiring a marshaled signed manifest should simply +// use Raw directly, since the the content produced by json.Marshal will be +// compacted and will fail signature checks. +func (sm *SignedManifest) MarshalJSON() ([]byte, error) { + if len(sm.all) > 0 { + return sm.all, nil + } + + // If the raw data is not available, just dump the inner content. + return json.Marshal(&sm.Manifest) +} + +// Payload returns the signed content of the signed manifest. +func (sm SignedManifest) Payload() (string, []byte, error) { + return MediaTypeSignedManifest, sm.all, nil +} + +// Signatures returns the signatures as provided by +// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws +// signatures. +func (sm *SignedManifest) Signatures() ([][]byte, error) { + jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + // Resolve the payload in the manifest. + return jsig.Signatures() +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go new file mode 100644 index 00000000..fc1045f9 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go @@ -0,0 +1,98 @@ +package schema1 + +import ( + "fmt" + + "errors" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +// referenceManifestBuilder is a type for constructing manifests from schema1 +// dependencies. +type referenceManifestBuilder struct { + Manifest + pk libtrust.PrivateKey +} + +// NewReferenceManifestBuilder is used to build new manifests for the current +// schema version using schema1 dependencies. +func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder { + tag := "" + if tagged, isTagged := ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + return &referenceManifestBuilder{ + Manifest: Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: ref.Name(), + Tag: tag, + Architecture: architecture, + }, + pk: pk, + } +} + +func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { + m := mb.Manifest + if len(m.FSLayers) == 0 { + return nil, errors.New("cannot build manifest with zero layers or history") + } + + m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) + m.History = make([]History, len(mb.Manifest.History)) + copy(m.FSLayers, mb.Manifest.FSLayers) + copy(m.History, mb.Manifest.History) + + return Sign(&m, mb.pk) +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { + r, ok := d.(Reference) + if !ok { + return fmt.Errorf("Unable to add non-reference type to v1 builder") + } + + // Entries need to be prepended + mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) + mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) + return nil + +} + +// References returns the current references added to this builder +func (mb *referenceManifestBuilder) References() []distribution.Descriptor { + refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) + for i := range mb.Manifest.FSLayers { + layerDigest := mb.Manifest.FSLayers[i].BlobSum + history := mb.Manifest.History[i] + ref := Reference{layerDigest, 0, history} + refs[i] = ref.Descriptor() + } + return refs +} + +// Reference describes a manifest v2, schema version 1 dependency. +// An FSLayer associated with a history entry. +type Reference struct { + Digest digest.Digest + Size int64 // if we know it, set it for the descriptor. + History History +} + +// Descriptor describes a reference +func (r Reference) Descriptor() distribution.Descriptor { + return distribution.Descriptor{ + MediaType: MediaTypeManifestLayer, + Digest: r.Digest, + Size: r.Size, + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/sign.go b/vendor/github.com/docker/distribution/manifest/schema1/sign.go new file mode 100644 index 00000000..c862dd81 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/sign.go @@ -0,0 +1,68 @@ +package schema1 + +import ( + "crypto/x509" + "encoding/json" + + "github.com/docker/libtrust" +) + +// Sign signs the manifest with the provided private key, returning a +// SignedManifest. This typically won't be used within the registry, except +// for testing. +func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.Sign(pk); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} + +// SignWithChain signs the manifest with the given private key and x509 chain. +// The public key of the first element in the chain must be the public key +// corresponding with the sign key. +func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.SignWithChain(key, chain); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/verify.go b/vendor/github.com/docker/distribution/manifest/schema1/verify.go new file mode 100644 index 00000000..fa8daa56 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/verify.go @@ -0,0 +1,32 @@ +package schema1 + +import ( + "crypto/x509" + + "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" +) + +// Verify verifies the signature of the signed manifest returning the public +// keys used during signing. +func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") + return nil, err + } + + return js.Verify() +} + +// VerifyChains verifies the signature of the signed manifest against the +// certificate pool returning the list of verified chains. Signatures without +// an x509 chain are not checked. +func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + return js.VerifyChains(ca) +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/github.com/docker/distribution/manifest/schema2/builder.go new file mode 100644 index 00000000..44b94eaa --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/builder.go @@ -0,0 +1,77 @@ +package schema2 + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// builder is a type for constructing manifests. +type builder struct { + // bs is a BlobService used to publish the configuration blob. + bs distribution.BlobService + + // configJSON references + configJSON []byte + + // layers is a list of layer descriptors that gets built by successive + // calls to AppendReference. + layers []distribution.Descriptor +} + +// NewManifestBuilder is used to build new manifests for the current schema +// version. It takes a BlobService so it can publish the configuration blob +// as part of the Build process. +func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { + mb := &builder{ + bs: bs, + configJSON: make([]byte, len(configJSON)), + } + copy(mb.configJSON, configJSON) + + return mb +} + +// Build produces a final manifest from the given references. +func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { + m := Manifest{ + Versioned: SchemaVersion, + Layers: make([]distribution.Descriptor, len(mb.layers)), + } + copy(m.Layers, mb.layers) + + configDigest := digest.FromBytes(mb.configJSON) + + var err error + m.Config, err = mb.bs.Stat(ctx, configDigest) + switch err { + case nil: + return FromStruct(m) + case distribution.ErrBlobUnknown: + // nop + default: + return nil, err + } + + // Add config to the blob store + m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = MediaTypeConfig + if err != nil { + return nil, err + } + + return FromStruct(m) +} + +// AppendReference adds a reference to the current ManifestBuilder. +func (mb *builder) AppendReference(d distribution.Describable) error { + mb.layers = append(mb.layers, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder. +func (mb *builder) References() []distribution.Descriptor { + return mb.layers +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go new file mode 100644 index 00000000..8d378e99 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go @@ -0,0 +1,125 @@ +package schema2 + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" + + // MediaTypeConfig specifies the mediaType for the image configuration. + MediaTypeConfig = "application/vnd.docker.container.image.v1+json" + + // MediaTypeLayer is the mediaType used for layers referenced by the + // manifest. + MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifest, + } +) + +func init() { + schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a schema2 manifest. +type Manifest struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Config distribution.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []distribution.Descriptor `json:"layers"` +} + +// References returnes the descriptors of this manifests references. +func (m Manifest) References() []distribution.Descriptor { + return m.Layers + +} + +// Target returns the target of this signed manifest. +func (m Manifest) Target() distribution.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(m.canonical, &manifest); err != nil { + return err + } + + m.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/versioned.go b/vendor/github.com/docker/distribution/manifest/versioned.go new file mode 100644 index 00000000..c57398bd --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/versioned.go @@ -0,0 +1,12 @@ +package manifest + +// Versioned provides a struct with the manifest schemaVersion and . Incoming +// content with unknown schema version can be decoded against this struct to +// check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go index 1acb0500..3bf912a6 100644 --- a/vendor/github.com/docker/distribution/manifests.go +++ b/vendor/github.com/docker/distribution/manifests.go @@ -53,12 +53,18 @@ type ManifestService interface { // Delete removes the manifest specified by the given digest. Deleting // a manifest that doesn't exist will return ErrManifestNotFound Delete(ctx context.Context, dgst digest.Digest) error +} - // Enumerate fills 'manifests' with the manifests in this service up - // to the size of 'manifests' and returns 'n' for the number of entries - // which were filled. 'last' contains an offset in the manifest set - // and can be used to resume iteration. - //Enumerate(ctx context.Context, manifests []Manifest, last Manifest) (n int, err error) +// ManifestEnumerator enables iterating over manifests +type ManifestEnumerator interface { + // Enumerate calls ingester for each manifest. + Enumerate(ctx context.Context, ingester func(digest.Digest) error) error +} + +// SignaturesGetter provides an interface for getting the signatures of a schema1 manifest. If the digest +// referred to is not a schema1 manifest, an error should be returned. +type SignaturesGetter interface { + GetSignatures(ctx context.Context, manifestDigest digest.Digest) ([]digest.Digest, error) } // Describable is an interface for descriptors @@ -81,7 +87,7 @@ type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) var mappings = make(map[string]UnmarshalFunc, 0) -// UnmarshalManifest looks up manifest unmarshall functions based on +// UnmarshalManifest looks up manifest unmarshal functions based on // MediaType func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { // Need to look up by the actual media type, not the raw contents of diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index c188472a..bb09fa25 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -3,10 +3,10 @@ // // Grammar // -// reference := repository [ ":" tag ] [ "@" digest ] +// reference := name [ ":" tag ] [ "@" digest ] // name := [hostname '/'] component ['/' component]* // hostname := hostcomponent ['.' hostcomponent]* [':' port-number] -// hostcomponent := /([a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])/ +// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // component := alpha-numeric [separator alpha-numeric]* // alpha-numeric := /[a-z0-9]+/ diff --git a/vendor/github.com/docker/distribution/reference/reference_test.go b/vendor/github.com/docker/distribution/reference/reference_test.go deleted file mode 100644 index cde1a7a2..00000000 --- a/vendor/github.com/docker/distribution/reference/reference_test.go +++ /dev/null @@ -1,535 +0,0 @@ -package reference - -import ( - "encoding/json" - "strconv" - "strings" - "testing" - - "github.com/docker/distribution/digest" -) - -func TestReferenceParse(t *testing.T) { - // referenceTestcases is a unified set of testcases for - // testing the parsing of references - referenceTestcases := []struct { - // input is the repository name or name component testcase - input string - // err is the error expected from Parse, or nil - err error - // repository is the string representation for the reference - repository string - // hostname is the hostname expected in the reference - hostname string - // tag is the tag for the reference - tag string - // digest is the digest for the reference (enforces digest reference) - digest string - }{ - { - input: "test_com", - repository: "test_com", - }, - { - input: "test.com:tag", - repository: "test.com", - tag: "tag", - }, - { - input: "test.com:5000", - repository: "test.com", - tag: "5000", - }, - { - input: "test.com/repo:tag", - hostname: "test.com", - repository: "test.com/repo", - tag: "tag", - }, - { - input: "test:5000/repo", - hostname: "test:5000", - repository: "test:5000/repo", - }, - { - input: "test:5000/repo:tag", - hostname: "test:5000", - repository: "test:5000/repo", - tag: "tag", - }, - { - input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - hostname: "test:5000", - repository: "test:5000/repo", - digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - }, - { - input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - hostname: "test:5000", - repository: "test:5000/repo", - tag: "tag", - digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - }, - { - input: "test:5000/repo", - hostname: "test:5000", - repository: "test:5000/repo", - }, - { - input: "", - err: ErrNameEmpty, - }, - { - input: ":justtag", - err: ErrReferenceInvalidFormat, - }, - { - input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: ErrReferenceInvalidFormat, - }, - { - input: "repo@sha256:ffffffffffffffffffffffffffffffffff", - err: digest.ErrDigestInvalidLength, - }, - { - input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: digest.ErrDigestUnsupported, - }, - { - input: strings.Repeat("a/", 128) + "a:tag", - err: ErrNameTooLong, - }, - { - input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max", - hostname: "a", - repository: strings.Repeat("a/", 127) + "a", - tag: "tag-puts-this-over-max", - }, - { - input: "aa/asdf$$^/aa", - err: ErrReferenceInvalidFormat, - }, - { - input: "sub-dom1.foo.com/bar/baz/quux", - hostname: "sub-dom1.foo.com", - repository: "sub-dom1.foo.com/bar/baz/quux", - }, - { - input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag", - hostname: "sub-dom1.foo.com", - repository: "sub-dom1.foo.com/bar/baz/quux", - tag: "some-long-tag", - }, - { - input: "b.gcr.io/test.example.com/my-app:test.example.com", - hostname: "b.gcr.io", - repository: "b.gcr.io/test.example.com/my-app", - tag: "test.example.com", - }, - { - input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode - hostname: "xn--n3h.com", - repository: "xn--n3h.com/myimage", - tag: "xn--n3h.com", - }, - { - input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode - hostname: "xn--7o8h.com", - repository: "xn--7o8h.com/myimage", - tag: "xn--7o8h.com", - digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - }, - { - input: "foo_bar.com:8080", - repository: "foo_bar.com", - tag: "8080", - }, - { - input: "foo/foo_bar.com:8080", - hostname: "foo", - repository: "foo/foo_bar.com", - tag: "8080", - }, - } - for _, testcase := range referenceTestcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - repo, err := Parse(testcase.input) - if testcase.err != nil { - if err == nil { - failf("missing expected error: %v", testcase.err) - } else if testcase.err != err { - failf("mismatched error: got %v, expected %v", err, testcase.err) - } - continue - } else if err != nil { - failf("unexpected parse error: %v", err) - continue - } - if repo.String() != testcase.input { - failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input) - } - - if named, ok := repo.(Named); ok { - if named.Name() != testcase.repository { - failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository) - } - hostname, _ := SplitHostname(named) - if hostname != testcase.hostname { - failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) - } - } else if testcase.repository != "" || testcase.hostname != "" { - failf("expected named type, got %T", repo) - } - - tagged, ok := repo.(Tagged) - if testcase.tag != "" { - if ok { - if tagged.Tag() != testcase.tag { - failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) - } - } else { - failf("expected tagged type, got %T", repo) - } - } else if ok { - failf("unexpected tagged type") - } - - digested, ok := repo.(Digested) - if testcase.digest != "" { - if ok { - if digested.Digest().String() != testcase.digest { - failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) - } - } else { - failf("expected digested type, got %T", repo) - } - } else if ok { - failf("unexpected digested type") - } - - } -} - -// TestWithNameFailure tests cases where WithName should fail. Cases where it -// should succeed are covered by TestSplitHostname, below. -func TestWithNameFailure(t *testing.T) { - testcases := []struct { - input string - err error - }{ - { - input: "", - err: ErrNameEmpty, - }, - { - input: ":justtag", - err: ErrReferenceInvalidFormat, - }, - { - input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: ErrReferenceInvalidFormat, - }, - { - input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: ErrReferenceInvalidFormat, - }, - { - input: strings.Repeat("a/", 128) + "a:tag", - err: ErrNameTooLong, - }, - { - input: "aa/asdf$$^/aa", - err: ErrReferenceInvalidFormat, - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - _, err := WithName(testcase.input) - if err == nil { - failf("no error parsing name. expected: %s", testcase.err) - } - } -} - -func TestSplitHostname(t *testing.T) { - testcases := []struct { - input string - hostname string - name string - }{ - { - input: "test.com/foo", - hostname: "test.com", - name: "foo", - }, - { - input: "test_com/foo", - hostname: "", - name: "test_com/foo", - }, - { - input: "test:8080/foo", - hostname: "test:8080", - name: "foo", - }, - { - input: "test.com:8080/foo", - hostname: "test.com:8080", - name: "foo", - }, - { - input: "test-com:8080/foo", - hostname: "test-com:8080", - name: "foo", - }, - { - input: "xn--n3h.com:18080/foo", - hostname: "xn--n3h.com:18080", - name: "foo", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - named, err := WithName(testcase.input) - if err != nil { - failf("error parsing name: %s", err) - } - hostname, name := SplitHostname(named) - if hostname != testcase.hostname { - failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) - } - if name != testcase.name { - failf("unexpected name: got %q, expected %q", name, testcase.name) - } - } -} - -type serializationType struct { - Description string - Field Field -} - -func TestSerialization(t *testing.T) { - testcases := []struct { - description string - input string - name string - tag string - digest string - err error - }{ - { - description: "empty value", - err: ErrNameEmpty, - }, - { - description: "just a name", - input: "example.com:8000/named", - name: "example.com:8000/named", - }, - { - description: "name with a tag", - input: "example.com:8000/named:tagged", - name: "example.com:8000/named", - tag: "tagged", - }, - { - description: "name with digest", - input: "other.com/named@sha256:1234567890098765432112345667890098765432112345667890098765432112", - name: "other.com/named", - digest: "sha256:1234567890098765432112345667890098765432112345667890098765432112", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - m := map[string]string{ - "Description": testcase.description, - "Field": testcase.input, - } - b, err := json.Marshal(m) - if err != nil { - failf("error marshalling: %v", err) - } - t := serializationType{} - - if err := json.Unmarshal(b, &t); err != nil { - if testcase.err == nil { - failf("error unmarshalling: %v", err) - } - if err != testcase.err { - failf("wrong error, expected %v, got %v", testcase.err, err) - } - - continue - } else if testcase.err != nil { - failf("expected error unmarshalling: %v", testcase.err) - } - - if t.Description != testcase.description { - failf("wrong description, expected %q, got %q", testcase.description, t.Description) - } - - ref := t.Field.Reference() - - if named, ok := ref.(Named); ok { - if named.Name() != testcase.name { - failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name) - } - } else if testcase.name != "" { - failf("expected named type, got %T", ref) - } - - tagged, ok := ref.(Tagged) - if testcase.tag != "" { - if ok { - if tagged.Tag() != testcase.tag { - failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) - } - } else { - failf("expected tagged type, got %T", ref) - } - } else if ok { - failf("unexpected tagged type") - } - - digested, ok := ref.(Digested) - if testcase.digest != "" { - if ok { - if digested.Digest().String() != testcase.digest { - failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) - } - } else { - failf("expected digested type, got %T", ref) - } - } else if ok { - failf("unexpected digested type") - } - - t = serializationType{ - Description: testcase.description, - Field: AsField(ref), - } - - b2, err := json.Marshal(t) - if err != nil { - failf("error marshing serialization type: %v", err) - } - - if string(b) != string(b2) { - failf("unexpected serialized value: expected %q, got %q", string(b), string(b2)) - } - - // Ensure t.Field is not implementing "Reference" directly, getting - // around the Reference type system - var fieldInterface interface{} = t.Field - if _, ok := fieldInterface.(Reference); ok { - failf("field should not implement Reference interface") - } - - } -} - -func TestWithTag(t *testing.T) { - testcases := []struct { - name string - tag string - combined string - }{ - { - name: "test.com/foo", - tag: "tag", - combined: "test.com/foo:tag", - }, - { - name: "foo", - tag: "tag2", - combined: "foo:tag2", - }, - { - name: "test.com:8000/foo", - tag: "tag4", - combined: "test.com:8000/foo:tag4", - }, - { - name: "test.com:8000/foo", - tag: "TAG5", - combined: "test.com:8000/foo:TAG5", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.name)+": "+format, v...) - t.Fail() - } - - named, err := WithName(testcase.name) - if err != nil { - failf("error parsing name: %s", err) - } - tagged, err := WithTag(named, testcase.tag) - if err != nil { - failf("WithTag failed: %s", err) - } - if tagged.String() != testcase.combined { - failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined) - } - } -} - -func TestWithDigest(t *testing.T) { - testcases := []struct { - name string - digest digest.Digest - combined string - }{ - { - name: "test.com/foo", - digest: "sha256:1234567890098765432112345667890098765", - combined: "test.com/foo@sha256:1234567890098765432112345667890098765", - }, - { - name: "foo", - digest: "sha256:1234567890098765432112345667890098765", - combined: "foo@sha256:1234567890098765432112345667890098765", - }, - { - name: "test.com:8000/foo", - digest: "sha256:1234567890098765432112345667890098765", - combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.name)+": "+format, v...) - t.Fail() - } - - named, err := WithName(testcase.name) - if err != nil { - failf("error parsing name: %s", err) - } - digested, err := WithDigest(named, testcase.digest) - if err != nil { - failf("WithDigest failed: %s", err) - } - if digested.String() != testcase.combined { - failf("unexpected: got %q, expected %q", digested.String(), testcase.combined) - } - } -} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go index a4ffe5b6..9a7d366b 100644 --- a/vendor/github.com/docker/distribution/reference/regexp.go +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -22,7 +22,7 @@ var ( // hostnameComponentRegexp restricts the registry hostname component of a // repository name to start with a component as defined by hostnameRegexp // and followed by an optional port. - hostnameComponentRegexp = match(`(?:[a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])`) + hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) // hostnameRegexp defines the structure of potential hostname components // that may be part of image names. This is purposely a subset of what is @@ -49,7 +49,7 @@ var ( // NameRegexp is the format for the name component of references. The // regexp has capturing groups for the hostname and name part omitting - // the seperating forward slash from either. + // the separating forward slash from either. NameRegexp = expression( optional(hostnameRegexp, literal(`/`)), nameComponentRegexp, diff --git a/vendor/github.com/docker/distribution/reference/regexp_test.go b/vendor/github.com/docker/distribution/reference/regexp_test.go deleted file mode 100644 index 33944918..00000000 --- a/vendor/github.com/docker/distribution/reference/regexp_test.go +++ /dev/null @@ -1,477 +0,0 @@ -package reference - -import ( - "regexp" - "strings" - "testing" -) - -type regexpMatch struct { - input string - match bool - subs []string -} - -func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) { - matches := r.FindStringSubmatch(m.input) - if m.match && matches != nil { - if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input { - t.Fatalf("Bad match result %#v for %q", matches, m.input) - } - if len(matches) < (len(m.subs) + 1) { - t.Errorf("Expected %d sub matches, only have %d for %q", len(m.subs), len(matches)-1, m.input) - } - for i := range m.subs { - if m.subs[i] != matches[i+1] { - t.Errorf("Unexpected submatch %d: %q, expected %q for %q", i+1, matches[i+1], m.subs[i], m.input) - } - } - } else if m.match { - t.Errorf("Expected match for %q", m.input) - } else if matches != nil { - t.Errorf("Unexpected match for %q", m.input) - } -} - -func TestHostRegexp(t *testing.T) { - hostcases := []regexpMatch{ - { - input: "test.com", - match: true, - }, - { - input: "test.com:10304", - match: true, - }, - { - input: "test.com:http", - match: false, - }, - { - input: "localhost", - match: true, - }, - { - input: "localhost:8080", - match: true, - }, - { - input: "a", - match: true, - }, - { - input: "a.b", - match: true, - }, - { - input: "ab.cd.com", - match: true, - }, - { - input: "a-b.com", - match: true, - }, - { - input: "-ab.com", - match: false, - }, - { - input: "ab-.com", - match: false, - }, - { - input: "ab.c-om", - match: true, - }, - { - input: "ab.-com", - match: false, - }, - { - input: "ab.com-", - match: false, - }, - { - input: "0101.com", - match: true, // TODO(dmcgowan): valid if this should be allowed - }, - { - input: "001a.com", - match: true, - }, - { - input: "b.gbc.io:443", - match: true, - }, - { - input: "b.gbc.io", - match: true, - }, - { - input: "xn--n3h.com", // ☃.com in punycode - match: true, - }, - } - r := regexp.MustCompile(`^` + hostnameRegexp.String() + `$`) - for i := range hostcases { - checkRegexp(t, r, hostcases[i]) - } -} - -func TestFullNameRegexp(t *testing.T) { - if anchoredNameRegexp.NumSubexp() != 2 { - t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2", - anchoredNameRegexp, anchoredNameRegexp.NumSubexp()) - } - - testcases := []regexpMatch{ - { - input: "", - match: false, - }, - { - input: "short", - match: true, - subs: []string{"", "short"}, - }, - { - input: "simple/name", - match: true, - subs: []string{"simple", "name"}, - }, - { - input: "library/ubuntu", - match: true, - subs: []string{"library", "ubuntu"}, - }, - { - input: "docker/stevvooe/app", - match: true, - subs: []string{"docker", "stevvooe/app"}, - }, - { - input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", - match: true, - subs: []string{"aa", "aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb"}, - }, - { - input: "aa/aa/bb/bb/bb", - match: true, - subs: []string{"aa", "aa/bb/bb/bb"}, - }, - { - input: "a/a/a/a", - match: true, - subs: []string{"a", "a/a/a"}, - }, - { - input: "a/a/a/a/", - match: false, - }, - { - input: "a//a/a", - match: false, - }, - { - input: "a", - match: true, - subs: []string{"", "a"}, - }, - { - input: "a/aa", - match: true, - subs: []string{"a", "aa"}, - }, - { - input: "a/aa/a", - match: true, - subs: []string{"a", "aa/a"}, - }, - { - input: "foo.com", - match: true, - subs: []string{"", "foo.com"}, - }, - { - input: "foo.com/", - match: false, - }, - { - input: "foo.com:8080/bar", - match: true, - subs: []string{"foo.com:8080", "bar"}, - }, - { - input: "foo.com:http/bar", - match: false, - }, - { - input: "foo.com/bar", - match: true, - subs: []string{"foo.com", "bar"}, - }, - { - input: "foo.com/bar/baz", - match: true, - subs: []string{"foo.com", "bar/baz"}, - }, - { - input: "localhost:8080/bar", - match: true, - subs: []string{"localhost:8080", "bar"}, - }, - { - input: "sub-dom1.foo.com/bar/baz/quux", - match: true, - subs: []string{"sub-dom1.foo.com", "bar/baz/quux"}, - }, - { - input: "blog.foo.com/bar/baz", - match: true, - subs: []string{"blog.foo.com", "bar/baz"}, - }, - { - input: "a^a", - match: false, - }, - { - input: "aa/asdf$$^/aa", - match: false, - }, - { - input: "asdf$$^/aa", - match: false, - }, - { - input: "aa-a/a", - match: true, - subs: []string{"aa-a", "a"}, - }, - { - input: strings.Repeat("a/", 128) + "a", - match: true, - subs: []string{"a", strings.Repeat("a/", 127) + "a"}, - }, - { - input: "a-/a/a/a", - match: false, - }, - { - input: "foo.com/a-/a/a", - match: false, - }, - { - input: "-foo/bar", - match: false, - }, - { - input: "foo/bar-", - match: false, - }, - { - input: "foo-/bar", - match: false, - }, - { - input: "foo/-bar", - match: false, - }, - { - input: "_foo/bar", - match: false, - }, - { - input: "foo_bar", - match: true, - subs: []string{"", "foo_bar"}, - }, - { - input: "foo_bar.com", - match: true, - subs: []string{"", "foo_bar.com"}, - }, - { - input: "foo_bar.com:8080", - match: false, - }, - { - input: "foo_bar.com:8080/app", - match: false, - }, - { - input: "foo.com/foo_bar", - match: true, - subs: []string{"foo.com", "foo_bar"}, - }, - { - input: "____/____", - match: false, - }, - { - input: "_docker/_docker", - match: false, - }, - { - input: "docker_/docker_", - match: false, - }, - { - input: "b.gcr.io/test.example.com/my-app", - match: true, - subs: []string{"b.gcr.io", "test.example.com/my-app"}, - }, - { - input: "xn--n3h.com/myimage", // ☃.com in punycode - match: true, - subs: []string{"xn--n3h.com", "myimage"}, - }, - { - input: "xn--7o8h.com/myimage", // 🐳.com in punycode - match: true, - subs: []string{"xn--7o8h.com", "myimage"}, - }, - { - input: "example.com/xn--7o8h.com/myimage", // 🐳.com in punycode - match: true, - subs: []string{"example.com", "xn--7o8h.com/myimage"}, - }, - { - input: "example.com/some_separator__underscore/myimage", - match: true, - subs: []string{"example.com", "some_separator__underscore/myimage"}, - }, - { - input: "example.com/__underscore/myimage", - match: false, - }, - { - input: "example.com/..dots/myimage", - match: false, - }, - { - input: "example.com/.dots/myimage", - match: false, - }, - { - input: "example.com/nodouble..dots/myimage", - match: false, - }, - { - input: "example.com/nodouble..dots/myimage", - match: false, - }, - { - input: "docker./docker", - match: false, - }, - { - input: ".docker/docker", - match: false, - }, - { - input: "docker-/docker", - match: false, - }, - { - input: "-docker/docker", - match: false, - }, - { - input: "do..cker/docker", - match: false, - }, - { - input: "do__cker:8080/docker", - match: false, - }, - { - input: "do__cker/docker", - match: true, - subs: []string{"", "do__cker/docker"}, - }, - { - input: "b.gcr.io/test.example.com/my-app", - match: true, - subs: []string{"b.gcr.io", "test.example.com/my-app"}, - }, - { - input: "registry.io/foo/project--id.module--name.ver---sion--name", - match: true, - subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"}, - }, - } - for i := range testcases { - checkRegexp(t, anchoredNameRegexp, testcases[i]) - } -} - -func TestReferenceRegexp(t *testing.T) { - if ReferenceRegexp.NumSubexp() != 3 { - t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3", - ReferenceRegexp, ReferenceRegexp.NumSubexp()) - } - - testcases := []regexpMatch{ - { - input: "registry.com:8080/myapp:tag", - match: true, - subs: []string{"registry.com:8080/myapp", "tag", ""}, - }, - { - input: "registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"registry.com:8080/myapp", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"registry.com:8080/myapp", "tag2", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "registry.com:8080/myapp@sha256:badbadbadbad", - match: false, - }, - { - input: "registry.com:8080/myapp:invalid~tag", - match: false, - }, - { - input: "bad_hostname.com:8080/myapp:tag", - match: false, - }, - { - input:// localhost treated as name, missing tag with 8080 as tag - "localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"localhost", "8080", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"localhost:8080/name", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: false, - }, - { - // localhost will be treated as an image name without a host - input: "localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"localhost", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "registry.com:8080/myapp@bad", - match: false, - }, - { - input: "registry.com:8080/myapp@2bad", - match: false, // TODO(dmcgowan): Support this as valid - }, - } - - for i := range testcases { - checkRegexp(t, ReferenceRegexp, testcases[i]) - } - -} diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go index dcb35c37..1ede31eb 100644 --- a/vendor/github.com/docker/distribution/registry.go +++ b/vendor/github.com/docker/distribution/registry.go @@ -40,6 +40,17 @@ type Namespace interface { // which were filled. 'last' contains an offset in the catalog, and 'err' will be // set to io.EOF if there are no more entries to obtain. Repositories(ctx context.Context, repos []string, last string) (n int, err error) + + // Blobs returns a blob enumerator to access all blobs + Blobs() BlobEnumerator + + // BlobStatter returns a BlobStatter to control + BlobStatter() BlobStatter +} + +// RepositoryEnumerator describes an operation to enumerate repositories +type RepositoryEnumerator interface { + Enumerate(ctx context.Context, ingester func(string) error) error } // ManifestServiceOption is a function argument for Manifest Service methods @@ -47,10 +58,24 @@ type ManifestServiceOption interface { Apply(ManifestService) error } +// WithTag allows a tag to be passed into Put +func WithTag(tag string) ManifestServiceOption { + return WithTagOption{tag} +} + +// WithTagOption holds a tag +type WithTagOption struct{ Tag string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithTagOption) Apply(m ManifestService) error { + // no implementation + return nil +} + // Repository is a named collection of manifests and layers. type Repository interface { - // Name returns the name of the repository. - Name() reference.Named + // Named returns the name of the repository. + Named() reference.Named // Manifests returns a reference to this repository's manifest service. // with the supplied options applied. diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors_test.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors_test.go deleted file mode 100644 index 54e7a736..00000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/errors_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package errcode - -import ( - "encoding/json" - "net/http" - "reflect" - "strings" - "testing" -) - -// TestErrorsManagement does a quick check of the Errors type to ensure that -// members are properly pushed and marshaled. -var ErrorCodeTest1 = Register("test.errors", ErrorDescriptor{ - Value: "TEST1", - Message: "test error 1", - Description: `Just a test message #1.`, - HTTPStatusCode: http.StatusInternalServerError, -}) - -var ErrorCodeTest2 = Register("test.errors", ErrorDescriptor{ - Value: "TEST2", - Message: "test error 2", - Description: `Just a test message #2.`, - HTTPStatusCode: http.StatusNotFound, -}) - -var ErrorCodeTest3 = Register("test.errors", ErrorDescriptor{ - Value: "TEST3", - Message: "Sorry %q isn't valid", - Description: `Just a test message #3.`, - HTTPStatusCode: http.StatusNotFound, -}) - -// TestErrorCodes ensures that error code format, mappings and -// marshaling/unmarshaling. round trips are stable. -func TestErrorCodes(t *testing.T) { - if len(errorCodeToDescriptors) == 0 { - t.Fatal("errors aren't loaded!") - } - - for ec, desc := range errorCodeToDescriptors { - if ec != desc.Code { - t.Fatalf("error code in descriptor isn't correct, %q != %q", ec, desc.Code) - } - - if idToDescriptors[desc.Value].Code != ec { - t.Fatalf("error code in idToDesc isn't correct, %q != %q", idToDescriptors[desc.Value].Code, ec) - } - - if ec.Message() != desc.Message { - t.Fatalf("ec.Message doesn't mtach desc.Message: %q != %q", ec.Message(), desc.Message) - } - - // Test (de)serializing the ErrorCode - p, err := json.Marshal(ec) - if err != nil { - t.Fatalf("couldn't marshal ec %v: %v", ec, err) - } - - if len(p) <= 0 { - t.Fatalf("expected content in marshaled before for error code %v", ec) - } - - // First, unmarshal to interface and ensure we have a string. - var ecUnspecified interface{} - if err := json.Unmarshal(p, &ecUnspecified); err != nil { - t.Fatalf("error unmarshaling error code %v: %v", ec, err) - } - - if _, ok := ecUnspecified.(string); !ok { - t.Fatalf("expected a string for error code %v on unmarshal got a %T", ec, ecUnspecified) - } - - // Now, unmarshal with the error code type and ensure they are equal - var ecUnmarshaled ErrorCode - if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { - t.Fatalf("error unmarshaling error code %v: %v", ec, err) - } - - if ecUnmarshaled != ec { - t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec) - } - - expectedErrorString := strings.ToLower(strings.Replace(ec.Descriptor().Value, "_", " ", -1)) - if ec.Error() != expectedErrorString { - t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString) - } - } - -} - -func TestErrorsManagement(t *testing.T) { - var errs Errors - - errs = append(errs, ErrorCodeTest1) - errs = append(errs, ErrorCodeTest2.WithDetail( - map[string]interface{}{"digest": "sometestblobsumdoesntmatter"})) - errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE")) - errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data")) - - p, err := json.Marshal(errs) - - if err != nil { - t.Fatalf("error marashaling errors: %v", err) - } - - expectedJSON := `{"errors":[` + - `{"code":"TEST1","message":"test error 1"},` + - `{"code":"TEST2","message":"test error 2","detail":{"digest":"sometestblobsumdoesntmatter"}},` + - `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid"},` + - `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid","detail":"data"}` + - `]}` - - if string(p) != expectedJSON { - t.Fatalf("unexpected json:\ngot:\n%q\n\nexpected:\n%q", string(p), expectedJSON) - } - - // Now test the reverse - var unmarshaled Errors - if err := json.Unmarshal(p, &unmarshaled); err != nil { - t.Fatalf("unexpected error unmarshaling error envelope: %v", err) - } - - if !reflect.DeepEqual(unmarshaled, errs) { - t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) - } - - // Test the arg substitution stuff - e1 := unmarshaled[3].(Error) - exp1 := `Sorry "BOOGIE" isn't valid` - if e1.Message != exp1 { - t.Fatalf("Wrong msg, got:\n%q\n\nexpected:\n%q", e1.Message, exp1) - } - - exp1 = "test3: " + exp1 - if e1.Error() != exp1 { - t.Fatalf("Error() didn't return the right string, got:%s\nexpected:%s", e1.Error(), exp1) - } - - // Test again with a single value this time - errs = Errors{ErrorCodeUnknown} - expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" - p, err = json.Marshal(errs) - - if err != nil { - t.Fatalf("error marashaling errors: %v", err) - } - - if string(p) != expectedJSON { - t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) - } - - // Now test the reverse - unmarshaled = nil - if err := json.Unmarshal(p, &unmarshaled); err != nil { - t.Fatalf("unexpected error unmarshaling error envelope: %v", err) - } - - if !reflect.DeepEqual(unmarshaled, errs) { - t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) - } - - // Verify that calling WithArgs() more than once does the right thing. - // Meaning creates a new Error and uses the ErrorCode Message - e1 = ErrorCodeTest3.WithArgs("test1") - e2 := e1.WithArgs("test2") - if &e1 == &e2 { - t.Fatalf("args: e2 and e1 should not be the same, but they are") - } - if e2.Message != `Sorry "test2" isn't valid` { - t.Fatalf("e2 had wrong message: %q", e2.Message) - } - - // Verify that calling WithDetail() more than once does the right thing. - // Meaning creates a new Error and overwrites the old detail field - e1 = ErrorCodeTest3.WithDetail("stuff1") - e2 = e1.WithDetail("stuff2") - if &e1 == &e2 { - t.Fatalf("detail: e2 and e1 should not be the same, but they are") - } - if e2.Detail != `stuff2` { - t.Fatalf("e2 had wrong detail: %q", e2.Detail) - } - -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go index ad3da3ef..58279994 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -271,7 +271,7 @@ type MethodDescriptor struct { // RequestDescriptor per API use case. type RequestDescriptor struct { // Name provides a short identifier for the request, usable as a title or - // to provide quick context for the particalar request. + // to provide quick context for the particular request. Name string // Description should cover the requests purpose, covering any details for @@ -303,14 +303,14 @@ type RequestDescriptor struct { // ResponseDescriptor describes the components of an API response. type ResponseDescriptor struct { // Name provides a short identifier for the response, usable as a title or - // to provide quick context for the particalar response. + // to provide quick context for the particular response. Name string // Description should provide a brief overview of the role of the // response. Description string - // StatusCode specifies the status recieved by this particular response. + // StatusCode specifies the status received by this particular response. StatusCode int // Headers covers any headers that may be returned from the response. @@ -514,7 +514,7 @@ var routeDescriptors = []RouteDescriptor{ digestHeader, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "", Format: manifestBody, }, }, @@ -553,7 +553,7 @@ var routeDescriptors = []RouteDescriptor{ referenceParameterDescriptor, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "", Format: manifestBody, }, Successes: []ResponseDescriptor{ diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go index ece52a2c..97d6923a 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/errors.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/errors.go @@ -84,7 +84,7 @@ var ( }) // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verfication. + // signature verification. ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_UNVERIFIED", Message: "manifest failed signature verification", diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go b/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go deleted file mode 100644 index f632d981..00000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go +++ /dev/null @@ -1,355 +0,0 @@ -package v2 - -import ( - "encoding/json" - "fmt" - "math/rand" - "net/http" - "net/http/httptest" - "reflect" - "strings" - "testing" - "time" - - "github.com/gorilla/mux" -) - -type routeTestCase struct { - RequestURI string - ExpectedURI string - Vars map[string]string - RouteName string - StatusCode int -} - -// TestRouter registers a test handler with all the routes and ensures that -// each route returns the expected path variables. Not method verification is -// present. This not meant to be exhaustive but as check to ensure that the -// expected variables are extracted. -// -// This may go away as the application structure comes together. -func TestRouter(t *testing.T) { - testCases := []routeTestCase{ - { - RouteName: RouteNameBase, - RequestURI: "/v2/", - Vars: map[string]string{}, - }, - { - RouteName: RouteNameManifest, - RequestURI: "/v2/foo/manifests/bar", - Vars: map[string]string{ - "name": "foo", - "reference": "bar", - }, - }, - { - RouteName: RouteNameManifest, - RequestURI: "/v2/foo/bar/manifests/tag", - Vars: map[string]string{ - "name": "foo/bar", - "reference": "tag", - }, - }, - { - RouteName: RouteNameManifest, - RequestURI: "/v2/foo/bar/manifests/sha256:abcdef01234567890", - Vars: map[string]string{ - "name": "foo/bar", - "reference": "sha256:abcdef01234567890", - }, - }, - { - RouteName: RouteNameTags, - RequestURI: "/v2/foo/bar/tags/list", - Vars: map[string]string{ - "name": "foo/bar", - }, - }, - { - RouteName: RouteNameTags, - RequestURI: "/v2/docker.com/foo/tags/list", - Vars: map[string]string{ - "name": "docker.com/foo", - }, - }, - { - RouteName: RouteNameTags, - RequestURI: "/v2/docker.com/foo/bar/tags/list", - Vars: map[string]string{ - "name": "docker.com/foo/bar", - }, - }, - { - RouteName: RouteNameTags, - RequestURI: "/v2/docker.com/foo/bar/baz/tags/list", - Vars: map[string]string{ - "name": "docker.com/foo/bar/baz", - }, - }, - { - RouteName: RouteNameBlob, - RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", - Vars: map[string]string{ - "name": "foo/bar", - "digest": "sha256:abcdef0919234", - }, - }, - { - RouteName: RouteNameBlobUpload, - RequestURI: "/v2/foo/bar/blobs/uploads/", - Vars: map[string]string{ - "name": "foo/bar", - }, - }, - { - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/uuid", - Vars: map[string]string{ - "name": "foo/bar", - "uuid": "uuid", - }, - }, - { - // support uuid proper - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - Vars: map[string]string{ - "name": "foo/bar", - "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - }, - }, - { - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", - Vars: map[string]string{ - "name": "foo/bar", - "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", - }, - }, - { - // supports urlsafe base64 - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", - Vars: map[string]string{ - "name": "foo/bar", - "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", - }, - }, - { - // does not match - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/totalandcompletejunk++$$-==", - StatusCode: http.StatusNotFound, - }, - { - // Check ambiguity: ensure we can distinguish between tags for - // "foo/bar/image/image" and image for "foo/bar/image" with tag - // "tags" - RouteName: RouteNameManifest, - RequestURI: "/v2/foo/bar/manifests/manifests/tags", - Vars: map[string]string{ - "name": "foo/bar/manifests", - "reference": "tags", - }, - }, - { - // This case presents an ambiguity between foo/bar with tag="tags" - // and list tags for "foo/bar/manifest" - RouteName: RouteNameTags, - RequestURI: "/v2/foo/bar/manifests/tags/list", - Vars: map[string]string{ - "name": "foo/bar/manifests", - }, - }, - { - RouteName: RouteNameManifest, - RequestURI: "/v2/locahost:8080/foo/bar/baz/manifests/tag", - Vars: map[string]string{ - "name": "locahost:8080/foo/bar/baz", - "reference": "tag", - }, - }, - } - - checkTestRouter(t, testCases, "", true) - checkTestRouter(t, testCases, "/prefix/", true) -} - -func TestRouterWithPathTraversals(t *testing.T) { - testCases := []routeTestCase{ - { - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - ExpectedURI: "/blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - StatusCode: http.StatusNotFound, - }, - { - // Testing for path traversal attack handling - RouteName: RouteNameTags, - RequestURI: "/v2/foo/../bar/baz/tags/list", - ExpectedURI: "/v2/bar/baz/tags/list", - Vars: map[string]string{ - "name": "bar/baz", - }, - }, - } - checkTestRouter(t, testCases, "", false) -} - -func TestRouterWithBadCharacters(t *testing.T) { - if testing.Short() { - testCases := []routeTestCase{ - { - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/blob/uploads/不95306FA-FAD3-4E36-8D41-CF1C93EF8286", - StatusCode: http.StatusNotFound, - }, - { - // Testing for path traversal attack handling - RouteName: RouteNameTags, - RequestURI: "/v2/foo/不bar/tags/list", - StatusCode: http.StatusNotFound, - }, - } - checkTestRouter(t, testCases, "", true) - } else { - // in the long version we're going to fuzz the router - // with random UTF8 characters not in the 128 bit ASCII range. - // These are not valid characters for the router and we expect - // 404s on every test. - rand.Seed(time.Now().UTC().UnixNano()) - testCases := make([]routeTestCase, 1000) - for idx := range testCases { - testCases[idx] = routeTestCase{ - RouteName: RouteNameTags, - RequestURI: fmt.Sprintf("/v2/%v/%v/tags/list", randomString(10), randomString(10)), - StatusCode: http.StatusNotFound, - } - } - checkTestRouter(t, testCases, "", true) - } -} - -func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, deeplyEqual bool) { - router := RouterWithPrefix(prefix) - - testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - testCase := routeTestCase{ - RequestURI: r.RequestURI, - Vars: mux.Vars(r), - RouteName: mux.CurrentRoute(r).GetName(), - } - - enc := json.NewEncoder(w) - - if err := enc.Encode(testCase); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - }) - - // Startup test server - server := httptest.NewServer(router) - - for _, testcase := range testCases { - testcase.RequestURI = strings.TrimSuffix(prefix, "/") + testcase.RequestURI - // Register the endpoint - route := router.GetRoute(testcase.RouteName) - if route == nil { - t.Fatalf("route for name %q not found", testcase.RouteName) - } - - route.Handler(testHandler) - - u := server.URL + testcase.RequestURI - - resp, err := http.Get(u) - - if err != nil { - t.Fatalf("error issuing get request: %v", err) - } - - if testcase.StatusCode == 0 { - // Override default, zero-value - testcase.StatusCode = http.StatusOK - } - if testcase.ExpectedURI == "" { - // Override default, zero-value - testcase.ExpectedURI = testcase.RequestURI - } - - if resp.StatusCode != testcase.StatusCode { - t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode) - } - - if testcase.StatusCode != http.StatusOK { - resp.Body.Close() - // We don't care about json response. - continue - } - - dec := json.NewDecoder(resp.Body) - - var actualRouteInfo routeTestCase - if err := dec.Decode(&actualRouteInfo); err != nil { - t.Fatalf("error reading json response: %v", err) - } - // Needs to be set out of band - actualRouteInfo.StatusCode = resp.StatusCode - - if actualRouteInfo.RequestURI != testcase.ExpectedURI { - t.Fatalf("URI %v incorrectly parsed, expected %v", actualRouteInfo.RequestURI, testcase.ExpectedURI) - } - - if actualRouteInfo.RouteName != testcase.RouteName { - t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName) - } - - // when testing deep equality, the actualRouteInfo has an empty ExpectedURI, we don't want - // that to make the comparison fail. We're otherwise done with the testcase so empty the - // testcase.ExpectedURI - testcase.ExpectedURI = "" - if deeplyEqual && !reflect.DeepEqual(actualRouteInfo, testcase) { - t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) - } - - resp.Body.Close() - } - -} - -// -------------- START LICENSED CODE -------------- -// The following code is derivative of https://github.com/google/gofuzz -// gofuzz is licensed under the Apache License, Version 2.0, January 2004, -// a copy of which can be found in the LICENSE file at the root of this -// repository. - -// These functions allow us to generate strings containing only multibyte -// characters that are invalid in our URLs. They are used above for fuzzing -// to ensure we always get 404s on these invalid strings -type charRange struct { - first, last rune -} - -// choose returns a random unicode character from the given range, using the -// given randomness source. -func (r *charRange) choose() rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) -} - -var unicodeRanges = []charRange{ - {'\u00a0', '\u02af'}, // Multi-byte encoded characters - {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) -} - -func randomString(length int) string { - runes := make([]rune, length) - for i := range runes { - runes[i] = unicodeRanges[rand.Intn(len(unicodeRanges))].choose() - } - return string(runes) -} - -// -------------- END LICENSED CODE -------------- diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go index 408c7b74..a959aaa8 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go @@ -17,33 +17,35 @@ import ( // under "/foo/v2/...". Most application will only provide a schema, host and // port, such as "https://localhost:5000/". type URLBuilder struct { - root *url.URL // url root (ie http://localhost/) - router *mux.Router + root *url.URL // url root (ie http://localhost/) + router *mux.Router + relative bool } // NewURLBuilder creates a URLBuilder with provided root url object. -func NewURLBuilder(root *url.URL) *URLBuilder { +func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { return &URLBuilder{ - root: root, - router: Router(), + root: root, + router: Router(), + relative: relative, } } // NewURLBuilderFromString workes identically to NewURLBuilder except it takes // a string argument for the root, returning an error if it is not a valid // url. -func NewURLBuilderFromString(root string) (*URLBuilder, error) { +func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { u, err := url.Parse(root) if err != nil { return nil, err } - return NewURLBuilder(u), nil + return NewURLBuilder(u, relative), nil } // NewURLBuilderFromRequest uses information from an *http.Request to // construct the root url. -func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { +func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { var scheme string forwardedProto := r.Header.Get("X-Forwarded-Proto") @@ -85,7 +87,7 @@ func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { u.Path = requestPath[0 : index+1] } - return NewURLBuilder(u) + return NewURLBuilder(u, relative) } // BuildBaseURL constructs a base url for the API, typically just "/v2/". @@ -194,12 +196,13 @@ func (ub *URLBuilder) cloneRoute(name string) clonedRoute { *route = *ub.router.GetRoute(name) // clone the route *root = *ub.root - return clonedRoute{Route: route, root: root} + return clonedRoute{Route: route, root: root, relative: ub.relative} } type clonedRoute struct { *mux.Route - root *url.URL + root *url.URL + relative bool } func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { @@ -208,6 +211,10 @@ func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { return nil, err } + if cr.relative { + return routeURL, nil + } + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { routeURL.Path = routeURL.Path[1:] } diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go b/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go deleted file mode 100644 index 1af1f261..00000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go +++ /dev/null @@ -1,304 +0,0 @@ -package v2 - -import ( - "net/http" - "net/url" - "testing" - - "github.com/docker/distribution/reference" -) - -type urlBuilderTestCase struct { - description string - expectedPath string - build func() (string, error) -} - -func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { - fooBarRef, _ := reference.ParseNamed("foo/bar") - return []urlBuilderTestCase{ - { - description: "test base url", - expectedPath: "/v2/", - build: urlBuilder.BuildBaseURL, - }, - { - description: "test tags url", - expectedPath: "/v2/foo/bar/tags/list", - build: func() (string, error) { - return urlBuilder.BuildTagsURL(fooBarRef) - }, - }, - { - description: "test manifest url", - expectedPath: "/v2/foo/bar/manifests/tag", - build: func() (string, error) { - ref, _ := reference.WithTag(fooBarRef, "tag") - return urlBuilder.BuildManifestURL(ref) - }, - }, - { - description: "build blob url", - expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", - build: func() (string, error) { - ref, _ := reference.WithDigest(fooBarRef, "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") - return urlBuilder.BuildBlobURL(ref) - }, - }, - { - description: "build blob upload url", - expectedPath: "/v2/foo/bar/blobs/uploads/", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadURL(fooBarRef) - }, - }, - { - description: "build blob upload url with digest and size", - expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadURL(fooBarRef, url.Values{ - "size": []string{"10000"}, - "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, - }) - }, - }, - { - description: "build blob upload chunk url", - expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part") - }, - }, - { - description: "build blob upload chunk url with digest and size", - expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part", url.Values{ - "size": []string{"10000"}, - "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, - }) - }, - }, - } -} - -// TestURLBuilder tests the various url building functions, ensuring they are -// returning the expected values. -func TestURLBuilder(t *testing.T) { - roots := []string{ - "http://example.com", - "https://example.com", - "http://localhost:5000", - "https://localhost:5443", - } - - for _, root := range roots { - urlBuilder, err := NewURLBuilderFromString(root) - if err != nil { - t.Fatalf("unexpected error creating urlbuilder: %v", err) - } - - for _, testCase := range makeURLBuilderTestCases(urlBuilder) { - url, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) - } - - expectedURL := root + testCase.expectedPath - - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) - } - } - } -} - -func TestURLBuilderWithPrefix(t *testing.T) { - roots := []string{ - "http://example.com/prefix/", - "https://example.com/prefix/", - "http://localhost:5000/prefix/", - "https://localhost:5443/prefix/", - } - - for _, root := range roots { - urlBuilder, err := NewURLBuilderFromString(root) - if err != nil { - t.Fatalf("unexpected error creating urlbuilder: %v", err) - } - - for _, testCase := range makeURLBuilderTestCases(urlBuilder) { - url, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) - } - - expectedURL := root[0:len(root)-1] + testCase.expectedPath - - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) - } - } - } -} - -type builderFromRequestTestCase struct { - request *http.Request - base string -} - -func TestBuilderFromRequest(t *testing.T) { - u, err := url.Parse("http://example.com") - if err != nil { - t.Fatal(err) - } - - forwardedProtoHeader := make(http.Header, 1) - forwardedProtoHeader.Set("X-Forwarded-Proto", "https") - - forwardedHostHeader1 := make(http.Header, 1) - forwardedHostHeader1.Set("X-Forwarded-Host", "first.example.com") - - forwardedHostHeader2 := make(http.Header, 1) - forwardedHostHeader2.Set("X-Forwarded-Host", "first.example.com, proxy1.example.com") - - testRequests := []struct { - request *http.Request - base string - configHost url.URL - }{ - { - request: &http.Request{URL: u, Host: u.Host}, - base: "http://example.com", - }, - - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, - base: "http://example.com", - }, - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, - base: "https://example.com", - }, - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader1}, - base: "http://first.example.com", - }, - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2}, - base: "http://first.example.com", - }, - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2}, - base: "https://third.example.com:5000", - configHost: url.URL{ - Scheme: "https", - Host: "third.example.com:5000", - }, - }, - } - - for _, tr := range testRequests { - var builder *URLBuilder - if tr.configHost.Scheme != "" && tr.configHost.Host != "" { - builder = NewURLBuilder(&tr.configHost) - } else { - builder = NewURLBuilderFromRequest(tr.request) - } - - for _, testCase := range makeURLBuilderTestCases(builder) { - buildURL, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) - } - - var expectedURL string - proto, ok := tr.request.Header["X-Forwarded-Proto"] - if !ok { - expectedURL = tr.base + testCase.expectedPath - } else { - urlBase, err := url.Parse(tr.base) - if err != nil { - t.Fatal(err) - } - urlBase.Scheme = proto[0] - expectedURL = urlBase.String() + testCase.expectedPath - } - - if buildURL != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) - } - } - } -} - -func TestBuilderFromRequestWithPrefix(t *testing.T) { - u, err := url.Parse("http://example.com/prefix/v2/") - if err != nil { - t.Fatal(err) - } - - forwardedProtoHeader := make(http.Header, 1) - forwardedProtoHeader.Set("X-Forwarded-Proto", "https") - - testRequests := []struct { - request *http.Request - base string - configHost url.URL - }{ - { - request: &http.Request{URL: u, Host: u.Host}, - base: "http://example.com/prefix/", - }, - - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, - base: "http://example.com/prefix/", - }, - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, - base: "https://example.com/prefix/", - }, - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, - base: "https://subdomain.example.com/prefix/", - configHost: url.URL{ - Scheme: "https", - Host: "subdomain.example.com", - Path: "/prefix/", - }, - }, - } - - for _, tr := range testRequests { - var builder *URLBuilder - if tr.configHost.Scheme != "" && tr.configHost.Host != "" { - builder = NewURLBuilder(&tr.configHost) - } else { - builder = NewURLBuilderFromRequest(tr.request) - } - - for _, testCase := range makeURLBuilderTestCases(builder) { - buildURL, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) - } - var expectedURL string - proto, ok := tr.request.Header["X-Forwarded-Proto"] - if !ok { - expectedURL = tr.base[0:len(tr.base)-1] + testCase.expectedPath - } else { - urlBase, err := url.Parse(tr.base) - if err != nil { - t.Fatal(err) - } - urlBase.Scheme = proto[0] - expectedURL = urlBase.String()[0:len(urlBase.String())-1] + testCase.expectedPath - } - - if buildURL != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) - } - } - } -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/authchallenge.go index a6ad45d8..c8cd83bb 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/authchallenge.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/authchallenge.go @@ -25,7 +25,7 @@ type Challenge struct { type ChallengeManager interface { // GetChallenges returns the challenges for the given // endpoint URL. - GetChallenges(endpoint string) ([]Challenge, error) + GetChallenges(endpoint url.URL) ([]Challenge, error) // AddResponse adds the response to the challenge // manager. The challenges will be parsed out of @@ -48,8 +48,10 @@ func NewSimpleChallengeManager() ChallengeManager { type simpleChallengeManager map[string][]Challenge -func (m simpleChallengeManager) GetChallenges(endpoint string) ([]Challenge, error) { - challenges := m[endpoint] +func (m simpleChallengeManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + endpoint.Host = strings.ToLower(endpoint.Host) + + challenges := m[endpoint.String()] return challenges, nil } @@ -60,11 +62,10 @@ func (m simpleChallengeManager) AddResponse(resp *http.Response) error { } urlCopy := url.URL{ Path: resp.Request.URL.Path, - Host: resp.Request.URL.Host, + Host: strings.ToLower(resp.Request.URL.Host), Scheme: resp.Request.URL.Scheme, } m[urlCopy.String()] = challenges - return nil } diff --git a/vendor/github.com/docker/distribution/registry/client/auth/authchallenge_test.go b/vendor/github.com/docker/distribution/registry/client/auth/authchallenge_test.go deleted file mode 100644 index 9b6a5adc..00000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/authchallenge_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package auth - -import ( - "net/http" - "testing" -) - -func TestAuthChallengeParse(t *testing.T) { - header := http.Header{} - header.Add("WWW-Authenticate", `Bearer realm="https://auth.example.com/token",service="registry.example.com",other=fun,slashed="he\"\l\lo"`) - - challenges := parseAuthHeader(header) - if len(challenges) != 1 { - t.Fatalf("Unexpected number of auth challenges: %d, expected 1", len(challenges)) - } - challenge := challenges[0] - - if expected := "bearer"; challenge.Scheme != expected { - t.Fatalf("Unexpected scheme: %s, expected: %s", challenge.Scheme, expected) - } - - if expected := "https://auth.example.com/token"; challenge.Parameters["realm"] != expected { - t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["realm"], expected) - } - - if expected := "registry.example.com"; challenge.Parameters["service"] != expected { - t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["service"], expected) - } - - if expected := "fun"; challenge.Parameters["other"] != expected { - t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["other"], expected) - } - - if expected := "he\"llo"; challenge.Parameters["slashed"] != expected { - t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected) - } - -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go index 6b483c62..f3497b17 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/session.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/session.go @@ -15,6 +15,18 @@ import ( "github.com/docker/distribution/registry/client/transport" ) +var ( + // ErrNoBasicAuthCredentials is returned if a request can't be authorized with + // basic auth due to lack of credentials. + ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") + + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") +) + +const defaultClientID = "registry-client" + // AuthenticationHandler is an interface for authorizing a request from // params from a "WWW-Authenicate" header for a single scheme. type AuthenticationHandler interface { @@ -32,6 +44,14 @@ type AuthenticationHandler interface { type CredentialStore interface { // Basic returns basic auth for the given URL Basic(*url.URL) (string, string) + + // RefreshToken returns a refresh token for the + // given URL and service + RefreshToken(*url.URL, string) string + + // SetRefreshToken sets the refresh token if none + // is provided for the given url and service + SetRefreshToken(realm *url.URL, service, token string) } // NewAuthorizer creates an authorizer which can handle multiple authentication @@ -63,9 +83,7 @@ func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { Path: req.URL.Path[:v2Root+4], } - pingEndpoint := ping.String() - - challenges, err := ea.challenges.GetChallenges(pingEndpoint) + challenges, err := ea.challenges.GetChallenges(ping) if err != nil { return err } @@ -101,27 +119,47 @@ type clock interface { type tokenHandler struct { header http.Header creds CredentialStore - scope tokenScope transport http.RoundTripper clock clock + offlineAccess bool + forceOAuth bool + clientID string + scopes []Scope + tokenLock sync.Mutex tokenCache string tokenExpiration time.Time - - additionalScopes map[string]struct{} } -// tokenScope represents the scope at which a token will be requested. -// This represents a specific action on a registry resource. -type tokenScope struct { - Resource string - Scope string - Actions []string +// Scope is a type which is serializable to a string +// using the allow scope grammar. +type Scope interface { + String() string } -func (ts tokenScope) String() string { - return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) +// RepositoryScope represents a token scope for access +// to a repository. +type RepositoryScope struct { + Repository string + Actions []string +} + +// String returns the string representation of the repository +// using the scope grammar +func (rs RepositoryScope) String() string { + return fmt.Sprintf("repository:%s:%s", rs.Repository, strings.Join(rs.Actions, ",")) +} + +// TokenHandlerOptions is used to configure a new token handler +type TokenHandlerOptions struct { + Transport http.RoundTripper + Credentials CredentialStore + + OfflineAccess bool + ForceOAuth bool + ClientID string + Scopes []Scope } // An implementation of clock for providing real time data. @@ -133,22 +171,33 @@ func (realClock) Now() time.Time { return time.Now() } // NewTokenHandler creates a new AuthenicationHandler which supports // fetching tokens from a remote token server. func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { - return newTokenHandler(transport, creds, realClock{}, scope, actions...) + // Create options... + return NewTokenHandlerWithOptions(TokenHandlerOptions{ + Transport: transport, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: scope, + Actions: actions, + }, + }, + }) } -// newTokenHandler exposes the option to provide a clock to manipulate time in unit testing. -func newTokenHandler(transport http.RoundTripper, creds CredentialStore, c clock, scope string, actions ...string) AuthenticationHandler { - return &tokenHandler{ - transport: transport, - creds: creds, - clock: c, - scope: tokenScope{ - Resource: "repository", - Scope: scope, - Actions: actions, - }, - additionalScopes: map[string]struct{}{}, +// NewTokenHandlerWithOptions creates a new token handler using the provided +// options structure. +func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { + handler := &tokenHandler{ + transport: options.Transport, + creds: options.Credentials, + offlineAccess: options.OfflineAccess, + forceOAuth: options.ForceOAuth, + clientID: options.ClientID, + scopes: options.Scopes, + clock: realClock{}, } + + return handler } func (th *tokenHandler) client() *http.Client { @@ -165,88 +214,162 @@ func (th *tokenHandler) Scheme() string { func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { var additionalScopes []string if fromParam := req.URL.Query().Get("from"); fromParam != "" { - additionalScopes = append(additionalScopes, tokenScope{ - Resource: "repository", - Scope: fromParam, - Actions: []string{"pull"}, + additionalScopes = append(additionalScopes, RepositoryScope{ + Repository: fromParam, + Actions: []string{"pull"}, }.String()) } - if err := th.refreshToken(params, additionalScopes...); err != nil { + + token, err := th.getToken(params, additionalScopes...) + if err != nil { return err } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.tokenCache)) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) return nil } -func (th *tokenHandler) refreshToken(params map[string]string, additionalScopes ...string) error { +func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { th.tokenLock.Lock() defer th.tokenLock.Unlock() + scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) + for _, scope := range th.scopes { + scopes = append(scopes, scope.String()) + } var addedScopes bool for _, scope := range additionalScopes { - if _, ok := th.additionalScopes[scope]; !ok { - th.additionalScopes[scope] = struct{}{} - addedScopes = true - } + scopes = append(scopes, scope) + addedScopes = true } + now := th.clock.Now() if now.After(th.tokenExpiration) || addedScopes { - tr, err := th.fetchToken(params) + token, expiration, err := th.fetchToken(params, scopes) if err != nil { - return err + return "", err } - th.tokenCache = tr.Token - th.tokenExpiration = tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second) + + // do not update cache for added scope tokens + if !addedScopes { + th.tokenCache = token + th.tokenExpiration = expiration + } + + return token, nil } - return nil + return th.tokenCache, nil } -type tokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` +type postTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` } -func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenResponse, err error) { - //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) - realm, ok := params["realm"] - if !ok { - return nil, errors.New("no realm specified for token auth challenge") +func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { + form := url.Values{} + form.Set("scope", strings.Join(scopes, " ")) + form.Set("service", service) + + clientID := th.clientID + if clientID == "" { + // Use default client, this is a required field + clientID = defaultClientID + } + form.Set("client_id", clientID) + + if refreshToken != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", refreshToken) + } else if th.creds != nil { + form.Set("grant_type", "password") + username, password := th.creds.Basic(realm) + form.Set("username", username) + form.Set("password", password) + + // attempt to get a refresh token + form.Set("access_type", "offline") + } else { + // refuse to do oauth without a grant type + return "", time.Time{}, fmt.Errorf("no supported grant type") } - // TODO(dmcgowan): Handle empty scheme - - realmURL, err := url.Parse(realm) + resp, err := th.client().PostForm(realm.String(), form) if err != nil { - return nil, fmt.Errorf("invalid token auth challenge realm: %s", err) + return "", time.Time{}, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err } - req, err := http.NewRequest("GET", realmURL.String(), nil) + decoder := json.NewDecoder(resp.Body) + + var tr postTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) + } + + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +type getTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { + + req, err := http.NewRequest("GET", realm.String(), nil) if err != nil { - return nil, err + return "", time.Time{}, err } reqParams := req.URL.Query() - service := params["service"] - scope := th.scope.String() if service != "" { reqParams.Add("service", service) } - for _, scopeField := range strings.Fields(scope) { - reqParams.Add("scope", scopeField) - } - - for scope := range th.additionalScopes { + for _, scope := range scopes { reqParams.Add("scope", scope) } + if th.offlineAccess { + reqParams.Add("offline_token", "true") + clientID := th.clientID + if clientID == "" { + clientID = defaultClientID + } + reqParams.Add("client_id", clientID) + } + if th.creds != nil { - username, password := th.creds.Basic(realmURL) + username, password := th.creds.Basic(realm) if username != "" && password != "" { reqParams.Add("account", username) req.SetBasicAuth(username, password) @@ -257,20 +380,24 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon resp, err := th.client().Do(req) if err != nil { - return nil, err + return "", time.Time{}, err } defer resp.Body.Close() if !client.SuccessStatus(resp.StatusCode) { err := client.HandleErrorResponse(resp) - return nil, err + return "", time.Time{}, err } decoder := json.NewDecoder(resp.Body) - tr := new(tokenResponse) - if err = decoder.Decode(tr); err != nil { - return nil, fmt.Errorf("unable to decode token response: %s", err) + var tr getTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && th.creds != nil { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) } // `access_token` is equivalent to `token` and if both are specified @@ -281,21 +408,48 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon } if tr.Token == "" { - return nil, errors.New("authorization server did not include a token in the response") + return "", time.Time{}, ErrNoToken } if tr.ExpiresIn < minimumTokenLifetimeSeconds { - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) // The default/minimum lifetime. tr.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) } if tr.IssuedAt.IsZero() { // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now() + tr.IssuedAt = th.clock.Now().UTC() } - return tr, nil + return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { + realm, ok := params["realm"] + if !ok { + return "", time.Time{}, errors.New("no realm specified for token auth challenge") + } + + // TODO(dmcgowan): Handle empty scheme and relative realm + realmURL, err := url.Parse(realm) + if err != nil { + return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + service := params["service"] + + var refreshToken string + + if th.creds != nil { + refreshToken = th.creds.RefreshToken(realmURL, service) + } + + if refreshToken != "" || th.forceOAuth { + return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) + } + + return th.fetchTokenWithBasicAuth(realmURL, service, scopes) } type basicHandler struct { @@ -322,5 +476,5 @@ func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]st return nil } } - return errors.New("no basic auth credentials") + return ErrNoBasicAuthCredentials } diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session_test.go b/vendor/github.com/docker/distribution/registry/client/auth/session_test.go deleted file mode 100644 index f1686942..00000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/session_test.go +++ /dev/null @@ -1,599 +0,0 @@ -package auth - -import ( - "encoding/base64" - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "testing" - "time" - - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/testutil" -) - -// An implementation of clock for providing fake time data. -type fakeClock struct { - current time.Time -} - -// Now implements clock -func (fc *fakeClock) Now() time.Time { return fc.current } - -func testServer(rrm testutil.RequestResponseMap) (string, func()) { - h := testutil.NewHandler(rrm) - s := httptest.NewServer(h) - return s.URL, s.Close -} - -type testAuthenticationWrapper struct { - headers http.Header - authCheck func(string) bool - next http.Handler -} - -func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - auth := r.Header.Get("Authorization") - if auth == "" || !w.authCheck(auth) { - h := rw.Header() - for k, values := range w.headers { - h[k] = values - } - rw.WriteHeader(http.StatusUnauthorized) - return - } - w.next.ServeHTTP(rw, r) -} - -func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) { - h := testutil.NewHandler(rrm) - wrapper := &testAuthenticationWrapper{ - - headers: http.Header(map[string][]string{ - "X-API-Version": {"registry/2.0"}, - "X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"}, - "WWW-Authenticate": {authenticate}, - }), - authCheck: authCheck, - next: h, - } - - s := httptest.NewServer(wrapper) - return s.URL, s.Close -} - -// ping pings the provided endpoint to determine its required authorization challenges. -// If a version header is provided, the versions will be returned. -func ping(manager ChallengeManager, endpoint, versionHeader string) ([]APIVersion, error) { - resp, err := http.Get(endpoint) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := manager.AddResponse(resp); err != nil { - return nil, err - } - - return APIVersions(resp, versionHeader), err -} - -type testCredentialStore struct { - username string - password string -} - -func (tcs *testCredentialStore) Basic(*url.URL) (string, string) { - return tcs.username, tcs.password -} - -func TestEndpointAuthorizeToken(t *testing.T) { - service := "localhost.localdomain" - repo1 := "some/registry" - repo2 := "other/registry" - scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) - scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) - tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope1), service), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"token":"statictoken"}`), - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope2), service), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"token":"badtoken"}`), - }, - }, - }) - te, tc := testServer(tokenMap) - defer tc() - - m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - }) - - authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) - validCheck := func(a string) bool { - return a == "Bearer statictoken" - } - e, c := testServerWithAuth(m, authenicate, validCheck) - defer c() - - challengeManager1 := NewSimpleChallengeManager() - versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") - if err != nil { - t.Fatal(err) - } - if len(versions) != 1 { - t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) - } - if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { - t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) - } - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, nil, repo1, "pull", "push"))) - client := &http.Client{Transport: transport1} - - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } - - badCheck := func(a string) bool { - return a == "Bearer statictoken" - } - e2, c2 := testServerWithAuth(m, authenicate, badCheck) - defer c2() - - challengeManager2 := NewSimpleChallengeManager() - versions, err = ping(challengeManager2, e+"/v2/", "x-multi-api-version") - if err != nil { - t.Fatal(err) - } - if len(versions) != 3 { - t.Fatalf("Unexpected version count: %d, expected 3", len(versions)) - } - if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { - t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) - } - if check := (APIVersion{Type: "registry", Version: "2.1"}); versions[1] != check { - t.Fatalf("Unexpected api version: %#v, expected %#v", versions[1], check) - } - if check := (APIVersion{Type: "trust", Version: "1.0"}); versions[2] != check { - t.Fatalf("Unexpected api version: %#v, expected %#v", versions[2], check) - } - transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, nil, repo2, "pull", "push"))) - client2 := &http.Client{Transport: transport2} - - req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) - resp, err = client2.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - - if resp.StatusCode != http.StatusUnauthorized { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) - } -} - -func basicAuth(username, password string) string { - auth := username + ":" + password - return base64.StdEncoding.EncodeToString([]byte(auth)) -} - -func TestEndpointAuthorizeTokenBasic(t *testing.T) { - service := "localhost.localdomain" - repo := "some/fun/registry" - scope := fmt.Sprintf("repository:%s:pull,push", repo) - username := "tokenuser" - password := "superSecretPa$$word" - - tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"access_token":"statictoken"}`), - }, - }, - }) - - authenicate1 := fmt.Sprintf("Basic realm=localhost") - basicCheck := func(a string) bool { - return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) - } - te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) - defer tc() - - m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - }) - - authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) - bearerCheck := func(a string) bool { - return a == "Bearer statictoken" - } - e, c := testServerWithAuth(m, authenicate2, bearerCheck) - defer c() - - creds := &testCredentialStore{ - username: username, - password: password, - } - - challengeManager := NewSimpleChallengeManager() - _, err := ping(challengeManager, e+"/v2/", "") - if err != nil { - t.Fatal(err) - } - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewTokenHandler(nil, creds, repo, "pull", "push"), NewBasicHandler(creds))) - client := &http.Client{Transport: transport1} - - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } -} - -func TestEndpointAuthorizeTokenBasicWithExpiresIn(t *testing.T) { - service := "localhost.localdomain" - repo := "some/fun/registry" - scope := fmt.Sprintf("repository:%s:pull,push", repo) - username := "tokenuser" - password := "superSecretPa$$word" - - tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"token":"statictoken", "expires_in": 3001}`), - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"access_token":"statictoken", "expires_in": 3001}`), - }, - }, - }) - - authenicate1 := fmt.Sprintf("Basic realm=localhost") - tokenExchanges := 0 - basicCheck := func(a string) bool { - tokenExchanges = tokenExchanges + 1 - return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) - } - te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) - defer tc() - - m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - }) - - authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) - bearerCheck := func(a string) bool { - return a == "Bearer statictoken" - } - e, c := testServerWithAuth(m, authenicate2, bearerCheck) - defer c() - - creds := &testCredentialStore{ - username: username, - password: password, - } - - challengeManager := NewSimpleChallengeManager() - _, err := ping(challengeManager, e+"/v2/", "") - if err != nil { - t.Fatal(err) - } - clock := &fakeClock{current: time.Now()} - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, newTokenHandler(nil, creds, clock, repo, "pull", "push"), NewBasicHandler(creds))) - client := &http.Client{Transport: transport1} - - // First call should result in a token exchange - // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. - timeIncrement := 1000 * time.Second - for i := 0; i < 4; i++ { - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } - if tokenExchanges != 1 { - t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) - } - clock.current = clock.current.Add(timeIncrement) - } - - // After we've exceeded the expiration, we should see a second token exchange. - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } - if tokenExchanges != 2 { - t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) - } -} - -func TestEndpointAuthorizeTokenBasicWithExpiresInAndIssuedAt(t *testing.T) { - service := "localhost.localdomain" - repo := "some/fun/registry" - scope := fmt.Sprintf("repository:%s:pull,push", repo) - username := "tokenuser" - password := "superSecretPa$$word" - - // This test sets things up such that the token was issued one increment - // earlier than its sibling in TestEndpointAuthorizeTokenBasicWithExpiresIn. - // This will mean that the token expires after 3 increments instead of 4. - clock := &fakeClock{current: time.Now()} - timeIncrement := 1000 * time.Second - firstIssuedAt := clock.Now() - clock.current = clock.current.Add(timeIncrement) - secondIssuedAt := clock.current.Add(2 * timeIncrement) - tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"token":"statictoken", "issued_at": "` + firstIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"access_token":"statictoken", "issued_at": "` + secondIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), - }, - }, - }) - - authenicate1 := fmt.Sprintf("Basic realm=localhost") - tokenExchanges := 0 - basicCheck := func(a string) bool { - tokenExchanges = tokenExchanges + 1 - return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) - } - te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) - defer tc() - - m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - }) - - authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) - bearerCheck := func(a string) bool { - return a == "Bearer statictoken" - } - e, c := testServerWithAuth(m, authenicate2, bearerCheck) - defer c() - - creds := &testCredentialStore{ - username: username, - password: password, - } - - challengeManager := NewSimpleChallengeManager() - _, err := ping(challengeManager, e+"/v2/", "") - if err != nil { - t.Fatal(err) - } - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, newTokenHandler(nil, creds, clock, repo, "pull", "push"), NewBasicHandler(creds))) - client := &http.Client{Transport: transport1} - - // First call should result in a token exchange - // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. - // We shaved one increment off of the equivalent logic in TestEndpointAuthorizeTokenBasicWithExpiresIn - // so this loop should have one fewer iteration. - for i := 0; i < 3; i++ { - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } - if tokenExchanges != 1 { - t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) - } - clock.current = clock.current.Add(timeIncrement) - } - - // After we've exceeded the expiration, we should see a second token exchange. - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } - if tokenExchanges != 2 { - t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) - } -} - -func TestEndpointAuthorizeBasic(t *testing.T) { - m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - }) - - username := "user1" - password := "funSecretPa$$word" - authenicate := fmt.Sprintf("Basic realm=localhost") - validCheck := func(a string) bool { - return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) - } - e, c := testServerWithAuth(m, authenicate, validCheck) - defer c() - creds := &testCredentialStore{ - username: username, - password: password, - } - - challengeManager := NewSimpleChallengeManager() - _, err := ping(challengeManager, e+"/v2/", "") - if err != nil { - t.Fatal(err) - } - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewBasicHandler(creds))) - client := &http.Client{Transport: transport1} - - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } -} diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go index 21a018dc..e3ffcb00 100644 --- a/vendor/github.com/docker/distribution/registry/client/blob_writer.go +++ b/vendor/github.com/docker/distribution/registry/client/blob_writer.go @@ -6,7 +6,6 @@ import ( "io" "io/ioutil" "net/http" - "os" "time" "github.com/docker/distribution" @@ -104,21 +103,8 @@ func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { } -func (hbu *httpBlobUpload) Seek(offset int64, whence int) (int64, error) { - newOffset := hbu.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset += int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - hbu.offset = newOffset - - return hbu.offset, nil +func (hbu *httpBlobUpload) Size() int64 { + return hbu.offset } func (hbu *httpBlobUpload) ID() string { diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer_test.go b/vendor/github.com/docker/distribution/registry/client/blob_writer_test.go deleted file mode 100644 index 099dca4f..00000000 --- a/vendor/github.com/docker/distribution/registry/client/blob_writer_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "net/http" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/testutil" -) - -// Test implements distribution.BlobWriter -var _ distribution.BlobWriter = &httpBlobUpload{} - -func TestUploadReadFrom(t *testing.T) { - _, b := newRandomBlob(64) - repo := "test/upload/readfrom" - locationPath := fmt.Sprintf("/v2/%s/uploads/testid", repo) - - m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/", - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - "Docker-Distribution-API-Version": {"registry/2.0"}, - }), - }, - }, - // Test Valid case - { - Request: testutil.Request{ - Method: "PATCH", - Route: locationPath, - Body: b, - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, - "Location": {locationPath}, - "Range": {"0-63"}, - }), - }, - }, - // Test invalid range - { - Request: testutil.Request{ - Method: "PATCH", - Route: locationPath, - Body: b, - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, - "Location": {locationPath}, - "Range": {""}, - }), - }, - }, - // Test 404 - { - Request: testutil.Request{ - Method: "PATCH", - Route: locationPath, - Body: b, - }, - Response: testutil.Response{ - StatusCode: http.StatusNotFound, - }, - }, - // Test 400 valid json - { - Request: testutil.Request{ - Method: "PATCH", - Route: locationPath, - Body: b, - }, - Response: testutil.Response{ - StatusCode: http.StatusBadRequest, - Body: []byte(` - { "errors": - [ - { - "code": "BLOB_UPLOAD_INVALID", - "message": "blob upload invalid", - "detail": "more detail" - } - ] - } `), - }, - }, - // Test 400 invalid json - { - Request: testutil.Request{ - Method: "PATCH", - Route: locationPath, - Body: b, - }, - Response: testutil.Response{ - StatusCode: http.StatusBadRequest, - Body: []byte("something bad happened"), - }, - }, - // Test 500 - { - Request: testutil.Request{ - Method: "PATCH", - Route: locationPath, - Body: b, - }, - Response: testutil.Response{ - StatusCode: http.StatusInternalServerError, - }, - }, - }) - - e, c := testServer(m) - defer c() - - blobUpload := &httpBlobUpload{ - client: &http.Client{}, - } - - // Valid case - blobUpload.location = e + locationPath - n, err := blobUpload.ReadFrom(bytes.NewReader(b)) - if err != nil { - t.Fatalf("Error calling ReadFrom: %s", err) - } - if n != 64 { - t.Fatalf("Wrong length returned from ReadFrom: %d, expected 64", n) - } - - // Bad range - blobUpload.location = e + locationPath - _, err = blobUpload.ReadFrom(bytes.NewReader(b)) - if err == nil { - t.Fatalf("Expected error when bad range received") - } - - // 404 - blobUpload.location = e + locationPath - _, err = blobUpload.ReadFrom(bytes.NewReader(b)) - if err == nil { - t.Fatalf("Expected error when not found") - } - if err != distribution.ErrBlobUploadUnknown { - t.Fatalf("Wrong error thrown: %s, expected %s", err, distribution.ErrBlobUploadUnknown) - } - - // 400 valid json - blobUpload.location = e + locationPath - _, err = blobUpload.ReadFrom(bytes.NewReader(b)) - if err == nil { - t.Fatalf("Expected error when not found") - } - if uploadErr, ok := err.(errcode.Errors); !ok { - t.Fatalf("Wrong error type %T: %s", err, err) - } else if len(uploadErr) != 1 { - t.Fatalf("Unexpected number of errors: %d, expected 1", len(uploadErr)) - } else { - v2Err, ok := uploadErr[0].(errcode.Error) - if !ok { - t.Fatalf("Not an 'Error' type: %#v", uploadErr[0]) - } - if v2Err.Code != v2.ErrorCodeBlobUploadInvalid { - t.Fatalf("Unexpected error code: %s, expected %d", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid) - } - if expected := "blob upload invalid"; v2Err.Message != expected { - t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Message, expected) - } - if expected := "more detail"; v2Err.Detail.(string) != expected { - t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Detail.(string), expected) - } - } - - // 400 invalid json - blobUpload.location = e + locationPath - _, err = blobUpload.ReadFrom(bytes.NewReader(b)) - if err == nil { - t.Fatalf("Expected error when not found") - } - if uploadErr, ok := err.(*UnexpectedHTTPResponseError); !ok { - t.Fatalf("Wrong error type %T: %s", err, err) - } else { - respStr := string(uploadErr.Response) - if expected := "something bad happened"; respStr != expected { - t.Fatalf("Unexpected response string: %s, expected: %s", respStr, expected) - } - } - - // 500 - blobUpload.location = e + locationPath - _, err = blobUpload.ReadFrom(bytes.NewReader(b)) - if err == nil { - t.Fatalf("Expected error when not found") - } - if uploadErr, ok := err.(*UnexpectedHTTPStatusError); !ok { - t.Fatalf("Wrong error type %T: %s", err, err) - } else if expected := "500 " + http.StatusText(http.StatusInternalServerError); uploadErr.Status != expected { - t.Fatalf("Unexpected response status: %s, expected %s", uploadErr.Status, expected) - } -} diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go index a528a865..00fafe11 100644 --- a/vendor/github.com/docker/distribution/registry/client/errors.go +++ b/vendor/github.com/docker/distribution/registry/client/errors.go @@ -2,6 +2,7 @@ package client import ( "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -10,6 +11,10 @@ import ( "github.com/docker/distribution/registry/api/errcode" ) +// ErrNoErrorsInBody is returned when a HTTP response body parses to an empty +// errcode.Errors slice. +var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") + // UnexpectedHTTPStatusError is returned when an unexpected HTTP status is // returned when making a registry api call. type UnexpectedHTTPStatusError struct { @@ -17,18 +22,19 @@ type UnexpectedHTTPStatusError struct { } func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) + return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) } // UnexpectedHTTPResponseError is returned when an expected HTTP status code // is returned, but the content was unexpected and failed to be parsed. type UnexpectedHTTPResponseError struct { - ParseErr error - Response []byte + ParseErr error + StatusCode int + Response []byte } func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) + return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) } func parseHTTPErrorResponse(statusCode int, r io.Reader) error { @@ -53,10 +59,22 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error { if err := json.Unmarshal(body, &errors); err != nil { return &UnexpectedHTTPResponseError{ - ParseErr: err, - Response: body, + ParseErr: err, + StatusCode: statusCode, + Response: body, } } + + if len(errors) == 0 { + // If there was no error specified in the body, return + // UnexpectedHTTPResponseError. + return &UnexpectedHTTPResponseError{ + ParseErr: ErrNoErrorsInBody, + StatusCode: statusCode, + Response: body, + } + } + return errors } diff --git a/vendor/github.com/docker/distribution/registry/client/errors_test.go b/vendor/github.com/docker/distribution/registry/client/errors_test.go deleted file mode 100644 index 80241a5a..00000000 --- a/vendor/github.com/docker/distribution/registry/client/errors_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package client - -import ( - "bytes" - "io" - "net/http" - "strings" - "testing" -) - -type nopCloser struct { - io.Reader -} - -func (nopCloser) Close() error { return nil } - -func TestHandleErrorResponse401ValidBody(t *testing.T) { - json := "{\"errors\":[{\"code\":\"UNAUTHORIZED\",\"message\":\"action requires authentication\"}]}" - response := &http.Response{ - Status: "401 Unauthorized", - StatusCode: 401, - Body: nopCloser{bytes.NewBufferString(json)}, - } - err := HandleErrorResponse(response) - - expectedMsg := "unauthorized: action requires authentication" - if !strings.Contains(err.Error(), expectedMsg) { - t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) - } -} - -func TestHandleErrorResponse401WithInvalidBody(t *testing.T) { - json := "{invalid json}" - response := &http.Response{ - Status: "401 Unauthorized", - StatusCode: 401, - Body: nopCloser{bytes.NewBufferString(json)}, - } - err := HandleErrorResponse(response) - - expectedMsg := "unauthorized: authentication required" - if !strings.Contains(err.Error(), expectedMsg) { - t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) - } -} - -func TestHandleErrorResponseExpectedStatusCode400ValidBody(t *testing.T) { - json := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest does not match\"}]}" - response := &http.Response{ - Status: "400 Bad Request", - StatusCode: 400, - Body: nopCloser{bytes.NewBufferString(json)}, - } - err := HandleErrorResponse(response) - - expectedMsg := "digest invalid: provided digest does not match" - if !strings.Contains(err.Error(), expectedMsg) { - t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) - } -} - -func TestHandleErrorResponseExpectedStatusCode404InvalidBody(t *testing.T) { - json := "{invalid json}" - response := &http.Response{ - Status: "404 Not Found", - StatusCode: 404, - Body: nopCloser{bytes.NewBufferString(json)}, - } - err := HandleErrorResponse(response) - - expectedMsg := "Error parsing HTTP response: invalid character 'i' looking for beginning of object key string: \"{invalid json}\"" - if !strings.Contains(err.Error(), expectedMsg) { - t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) - } -} - -func TestHandleErrorResponseUnexpectedStatusCode501(t *testing.T) { - response := &http.Response{ - Status: "501 Not Implemented", - StatusCode: 501, - Body: nopCloser{bytes.NewBufferString("{\"Error Encountered\" : \"Function not implemented.\"}")}, - } - err := HandleErrorResponse(response) - - expectedMsg := "Received unexpected HTTP status: 501 Not Implemented" - if !strings.Contains(err.Error(), expectedMsg) { - t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) - } -} diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go index 1f777add..8cc5f7f9 100644 --- a/vendor/github.com/docker/distribution/registry/client/repository.go +++ b/vendor/github.com/docker/distribution/registry/client/repository.go @@ -27,16 +27,50 @@ type Registry interface { Repositories(ctx context.Context, repos []string, last string) (n int, err error) } +// checkHTTPRedirect is a callback that can manipulate redirected HTTP +// requests. It is used to preserve Accept and Range headers. +func checkHTTPRedirect(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + + if len(via) > 0 { + for headerName, headerVals := range via[0].Header { + if headerName != "Accept" && headerName != "Range" { + continue + } + for _, val := range headerVals { + // Don't add to redirected request if redirected + // request already has a header with the same + // name and value. + hasValue := false + for _, existingVal := range req.Header[headerName] { + if existingVal == val { + hasValue = true + break + } + } + if !hasValue { + req.Header.Add(headerName, val) + } + } + } + } + + return nil +} + // NewRegistry creates a registry namespace which can be used to get a listing of repositories func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL) + ub, err := v2.NewURLBuilderFromString(baseURL, false) if err != nil { return nil, err } client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, + Transport: transport, + Timeout: 1 * time.Minute, + CheckRedirect: checkHTTPRedirect, } return ®istry{ @@ -99,13 +133,14 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri // NewRepository creates a new Repository for the given repository name and base URL. func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - ub, err := v2.NewURLBuilderFromString(baseURL) + ub, err := v2.NewURLBuilderFromString(baseURL, false) if err != nil { return nil, err } client := &http.Client{ - Transport: transport, + Transport: transport, + CheckRedirect: checkHTTPRedirect, // TODO(dmcgowan): create cookie jar } @@ -124,7 +159,7 @@ type repository struct { name reference.Named } -func (r *repository) Name() reference.Named { +func (r *repository) Named() reference.Named { return r.name } @@ -157,7 +192,7 @@ func (r *repository) Tags(ctx context.Context) distribution.TagService { client: r.client, ub: r.ub, context: r.context, - name: r.Name(), + name: r.Named(), } } @@ -257,19 +292,38 @@ func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, er if err != nil { return distribution.Descriptor{}, err } - var attempts int - resp, err := t.client.Head(u) + req, err := http.NewRequest("HEAD", u, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + + var attempts int + resp, err := t.client.Do(req) check: if err != nil { return distribution.Descriptor{}, err } + defer resp.Body.Close() switch { case resp.StatusCode >= 200 && resp.StatusCode < 400: return descriptorFromResponse(resp) case resp.StatusCode == http.StatusMethodNotAllowed: - resp, err = t.client.Get(u) + req, err = http.NewRequest("GET", u, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + + resp, err = t.client.Do(req) attempts++ if attempts > 1 { return distribution.Descriptor{}, err @@ -348,9 +402,9 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis ) for _, option := range options { - if opt, ok := option.(withTagOption); ok { - digestOrTag = opt.tag - ref, err = reference.WithTag(ms.name, opt.tag) + if opt, ok := option.(distribution.WithTagOption); ok { + digestOrTag = opt.Tag + ref, err = reference.WithTag(ms.name, opt.Tag) if err != nil { return nil, err } @@ -411,33 +465,20 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis return nil, HandleErrorResponse(resp) } -// WithTag allows a tag to be passed into Put which enables the client -// to build a correct URL. -func WithTag(tag string) distribution.ManifestServiceOption { - return withTagOption{tag} -} - -type withTagOption struct{ tag string } - -func (o withTagOption) Apply(m distribution.ManifestService) error { - if _, ok := m.(*manifests); ok { - return nil - } - return fmt.Errorf("withTagOption is a client-only option") -} - // Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the -// tag name in order to build the correct upload URL. This state is written and read under a lock. +// tag name in order to build the correct upload URL. func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { ref := ms.name + var tagged bool for _, option := range options { - if opt, ok := option.(withTagOption); ok { + if opt, ok := option.(distribution.WithTagOption); ok { var err error - ref, err = reference.WithTag(ref, opt.tag) + ref, err = reference.WithTag(ref, opt.Tag) if err != nil { return "", err } + tagged = true } else { err := option.Apply(ms) if err != nil { @@ -445,13 +486,24 @@ func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options . } } } - - manifestURL, err := ms.ub.BuildManifestURL(ref) + mediaType, p, err := m.Payload() if err != nil { return "", err } - mediaType, p, err := m.Payload() + if !tagged { + // generate a canonical digest and Put by digest + _, d, err := distribution.UnmarshalManifest(mediaType, p) + if err != nil { + return "", err + } + ref, err = reference.WithDigest(ref, d.Digest) + if err != nil { + return "", err + } + } + + manifestURL, err := ms.ub.BuildManifestURL(ref) if err != nil { return "", err } diff --git a/vendor/github.com/docker/distribution/registry/client/repository_test.go b/vendor/github.com/docker/distribution/registry/client/repository_test.go deleted file mode 100644 index b7b782c7..00000000 --- a/vendor/github.com/docker/distribution/registry/client/repository_test.go +++ /dev/null @@ -1,1073 +0,0 @@ -package client - -import ( - "bytes" - "crypto/rand" - "fmt" - "io" - "log" - "net/http" - "net/http/httptest" - "strconv" - "strings" - "testing" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/testutil" - "github.com/docker/distribution/uuid" - "github.com/docker/libtrust" -) - -func testServer(rrm testutil.RequestResponseMap) (string, func()) { - h := testutil.NewHandler(rrm) - s := httptest.NewServer(h) - return s.URL, s.Close -} - -func newRandomBlob(size int) (digest.Digest, []byte) { - b := make([]byte, size) - if n, err := rand.Read(b); err != nil { - panic(err) - } else if n != size { - panic("unable to read enough bytes") - } - - return digest.FromBytes(b), b -} - -func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { - *m = append(*m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: content, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - - *m = append(*m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "HEAD", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) -} - -func addTestCatalog(route string, content []byte, link string, m *testutil.RequestResponseMap) { - headers := map[string][]string{ - "Content-Length": {strconv.Itoa(len(content))}, - "Content-Type": {"application/json; charset=utf-8"}, - } - if link != "" { - headers["Link"] = append(headers["Link"], link) - } - - *m = append(*m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: route, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: content, - Headers: http.Header(headers), - }, - }) -} - -func TestBlobDelete(t *testing.T) { - dgst, _ := newRandomBlob(1024) - var m testutil.RequestResponseMap - repo, _ := reference.ParseNamed("test.example.com/repo1") - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "DELETE", - Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - }), - }, - }) - - e, c := testServer(m) - defer c() - - ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - l := r.Blobs(ctx) - err = l.Delete(ctx, dgst) - if err != nil { - t.Errorf("Error deleting blob: %s", err.Error()) - } - -} - -func TestBlobFetch(t *testing.T) { - d1, b1 := newRandomBlob(1024) - var m testutil.RequestResponseMap - addTestFetch("test.example.com/repo1", d1, b1, &m) - - e, c := testServer(m) - defer c() - - ctx := context.Background() - repo, _ := reference.ParseNamed("test.example.com/repo1") - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - l := r.Blobs(ctx) - - b, err := l.Get(ctx, d1) - if err != nil { - t.Fatal(err) - } - if bytes.Compare(b, b1) != 0 { - t.Fatalf("Wrong bytes values fetched: [%d]byte != [%d]byte", len(b), len(b1)) - } - - // TODO(dmcgowan): Test for unknown blob case -} - -func TestBlobExistsNoContentLength(t *testing.T) { - var m testutil.RequestResponseMap - - repo, _ := reference.ParseNamed("biff") - dgst, content := newRandomBlob(1024) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: content, - Headers: http.Header(map[string][]string{ - // "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "HEAD", - Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - // "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - e, c := testServer(m) - defer c() - - ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - l := r.Blobs(ctx) - - _, err = l.Stat(ctx, dgst) - if err == nil { - t.Fatal(err) - } - if !strings.Contains(err.Error(), "missing content-length heade") { - t.Fatalf("Expected missing content-length error message") - } - -} - -func TestBlobExists(t *testing.T) { - d1, b1 := newRandomBlob(1024) - var m testutil.RequestResponseMap - addTestFetch("test.example.com/repo1", d1, b1, &m) - - e, c := testServer(m) - defer c() - - ctx := context.Background() - repo, _ := reference.ParseNamed("test.example.com/repo1") - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - l := r.Blobs(ctx) - - stat, err := l.Stat(ctx, d1) - if err != nil { - t.Fatal(err) - } - - if stat.Digest != d1 { - t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1) - } - - if stat.Size != int64(len(b1)) { - t.Fatalf("Unexpected length: %d, expected %d", stat.Size, len(b1)) - } - - // TODO(dmcgowan): Test error cases and ErrBlobUnknown case -} - -func TestBlobUploadChunked(t *testing.T) { - dgst, b1 := newRandomBlob(1024) - var m testutil.RequestResponseMap - chunks := [][]byte{ - b1[0:256], - b1[256:512], - b1[512:513], - b1[513:1024], - } - repo, _ := reference.ParseNamed("test.example.com/uploadrepo") - uuids := []string{uuid.Generate().String()} - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "POST", - Route: "/v2/" + repo.Name() + "/blobs/uploads/", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[0]}, - "Docker-Upload-UUID": {uuids[0]}, - "Range": {"0-0"}, - }), - }, - }) - offset := 0 - for i, chunk := range chunks { - uuids = append(uuids, uuid.Generate().String()) - newOffset := offset + len(chunk) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PATCH", - Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i], - Body: chunk, - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i+1]}, - "Docker-Upload-UUID": {uuids[i+1]}, - "Range": {fmt.Sprintf("%d-%d", offset, newOffset-1)}, - }), - }, - }) - offset = newOffset - } - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PUT", - Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[len(uuids)-1], - QueryParams: map[string][]string{ - "digest": {dgst.String()}, - }, - }, - Response: testutil.Response{ - StatusCode: http.StatusCreated, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Docker-Content-Digest": {dgst.String()}, - "Content-Range": {fmt.Sprintf("0-%d", offset-1)}, - }), - }, - }) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "HEAD", - Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(offset)}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - - e, c := testServer(m) - defer c() - - ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - l := r.Blobs(ctx) - - upload, err := l.Create(ctx) - if err != nil { - t.Fatal(err) - } - - if upload.ID() != uuids[0] { - log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uuids[0]) - } - - for _, chunk := range chunks { - n, err := upload.Write(chunk) - if err != nil { - t.Fatal(err) - } - if n != len(chunk) { - t.Fatalf("Unexpected length returned from write: %d; expected: %d", n, len(chunk)) - } - } - - blob, err := upload.Commit(ctx, distribution.Descriptor{ - Digest: dgst, - Size: int64(len(b1)), - }) - if err != nil { - t.Fatal(err) - } - - if blob.Size != int64(len(b1)) { - t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) - } -} - -func TestBlobUploadMonolithic(t *testing.T) { - dgst, b1 := newRandomBlob(1024) - var m testutil.RequestResponseMap - repo, _ := reference.ParseNamed("test.example.com/uploadrepo") - uploadID := uuid.Generate().String() - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "POST", - Route: "/v2/" + repo.Name() + "/blobs/uploads/", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, - "Docker-Upload-UUID": {uploadID}, - "Range": {"0-0"}, - }), - }, - }) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PATCH", - Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, - Body: b1, - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, - "Docker-Upload-UUID": {uploadID}, - "Content-Length": {"0"}, - "Docker-Content-Digest": {dgst.String()}, - "Range": {fmt.Sprintf("0-%d", len(b1)-1)}, - }), - }, - }) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PUT", - Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, - QueryParams: map[string][]string{ - "digest": {dgst.String()}, - }, - }, - Response: testutil.Response{ - StatusCode: http.StatusCreated, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Docker-Content-Digest": {dgst.String()}, - "Content-Range": {fmt.Sprintf("0-%d", len(b1)-1)}, - }), - }, - }) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "HEAD", - Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(b1))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - - e, c := testServer(m) - defer c() - - ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - l := r.Blobs(ctx) - - upload, err := l.Create(ctx) - if err != nil { - t.Fatal(err) - } - - if upload.ID() != uploadID { - log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uploadID) - } - - n, err := upload.ReadFrom(bytes.NewReader(b1)) - if err != nil { - t.Fatal(err) - } - if n != int64(len(b1)) { - t.Fatalf("Unexpected ReadFrom length: %d; expected: %d", n, len(b1)) - } - - blob, err := upload.Commit(ctx, distribution.Descriptor{ - Digest: dgst, - Size: int64(len(b1)), - }) - if err != nil { - t.Fatal(err) - } - - if blob.Size != int64(len(b1)) { - t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) - } -} - -func TestBlobMount(t *testing.T) { - dgst, content := newRandomBlob(1024) - var m testutil.RequestResponseMap - repo, _ := reference.ParseNamed("test.example.com/uploadrepo") - - sourceRepo, _ := reference.ParseNamed("test.example.com/sourcerepo") - canonicalRef, _ := reference.WithDigest(sourceRepo, dgst) - - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "POST", - Route: "/v2/" + repo.Name() + "/blobs/uploads/", - QueryParams: map[string][]string{"from": {sourceRepo.Name()}, "mount": {dgst.String()}}, - }, - Response: testutil.Response{ - StatusCode: http.StatusCreated, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Location": {"/v2/" + repo.Name() + "/blobs/" + dgst.String()}, - "Docker-Content-Digest": {dgst.String()}, - }), - }, - }) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "HEAD", - Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - - e, c := testServer(m) - defer c() - - ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - - l := r.Blobs(ctx) - - bw, err := l.Create(ctx, WithMountFrom(canonicalRef)) - if bw != nil { - t.Fatalf("Expected blob writer to be nil, was %v", bw) - } - - if ebm, ok := err.(distribution.ErrBlobMounted); ok { - if ebm.From.Digest() != dgst { - t.Fatalf("Unexpected digest: %s, expected %s", ebm.From.Digest(), dgst) - } - if ebm.From.Name() != sourceRepo.Name() { - t.Fatalf("Unexpected from: %s, expected %s", ebm.From.Name(), sourceRepo) - } - } else { - t.Fatalf("Unexpected error: %v, expected an ErrBlobMounted", err) - } -} - -func newRandomSchemaV1Manifest(name reference.Named, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { - blobs := make([]schema1.FSLayer, blobCount) - history := make([]schema1.History, blobCount) - - for i := 0; i < blobCount; i++ { - dgst, blob := newRandomBlob((i % 5) * 16) - - blobs[i] = schema1.FSLayer{BlobSum: dgst} - history[i] = schema1.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} - } - - m := schema1.Manifest{ - Name: name.String(), - Tag: tag, - Architecture: "x86", - FSLayers: blobs, - History: history, - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - } - - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - panic(err) - } - - sm, err := schema1.Sign(&m, pk) - if err != nil { - panic(err) - } - - return sm, digest.FromBytes(sm.Canonical), sm.Canonical -} - -func addTestManifestWithEtag(repo reference.Named, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { - actualDigest := digest.FromBytes(content) - getReqWithEtag := testutil.Request{ - Method: "GET", - Route: "/v2/" + repo.Name() + "/manifests/" + reference, - Headers: http.Header(map[string][]string{ - "If-None-Match": {fmt.Sprintf(`"%s"`, dgst)}, - }), - } - - var getRespWithEtag testutil.Response - if actualDigest.String() == dgst { - getRespWithEtag = testutil.Response{ - StatusCode: http.StatusNotModified, - Body: []byte{}, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {schema1.MediaTypeSignedManifest}, - }), - } - } else { - getRespWithEtag = testutil.Response{ - StatusCode: http.StatusOK, - Body: content, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {schema1.MediaTypeSignedManifest}, - }), - } - - } - *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) -} - -func addTestManifest(repo reference.Named, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) { - *m = append(*m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + repo.Name() + "/manifests/" + reference, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: content, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {mediatype}, - }), - }, - }) - *m = append(*m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "HEAD", - Route: "/v2/" + repo.Name() + "/manifests/" + reference, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {mediatype}, - }), - }, - }) - -} - -func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { - if m1.Name != m2.Name { - return fmt.Errorf("name does not match %q != %q", m1.Name, m2.Name) - } - if m1.Tag != m2.Tag { - return fmt.Errorf("tag does not match %q != %q", m1.Tag, m2.Tag) - } - if len(m1.FSLayers) != len(m2.FSLayers) { - return fmt.Errorf("fs blob length does not match %d != %d", len(m1.FSLayers), len(m2.FSLayers)) - } - for i := range m1.FSLayers { - if m1.FSLayers[i].BlobSum != m2.FSLayers[i].BlobSum { - return fmt.Errorf("blobsum does not match %q != %q", m1.FSLayers[i].BlobSum, m2.FSLayers[i].BlobSum) - } - } - if len(m1.History) != len(m2.History) { - return fmt.Errorf("history length does not match %d != %d", len(m1.History), len(m2.History)) - } - for i := range m1.History { - if m1.History[i].V1Compatibility != m2.History[i].V1Compatibility { - return fmt.Errorf("blobsum does not match %q != %q", m1.History[i].V1Compatibility, m2.History[i].V1Compatibility) - } - } - return nil -} - -func TestV1ManifestFetch(t *testing.T) { - ctx := context.Background() - repo, _ := reference.ParseNamed("test.example.com/repo") - m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) - var m testutil.RequestResponseMap - _, pl, err := m1.Payload() - if err != nil { - t.Fatal(err) - } - addTestManifest(repo, dgst.String(), schema1.MediaTypeSignedManifest, pl, &m) - addTestManifest(repo, "latest", schema1.MediaTypeSignedManifest, pl, &m) - addTestManifest(repo, "badcontenttype", "text/html", pl, &m) - - e, c := testServer(m) - defer c() - - r, err := NewRepository(context.Background(), repo, e, nil) - if err != nil { - t.Fatal(err) - } - ms, err := r.Manifests(ctx) - if err != nil { - t.Fatal(err) - } - - ok, err := ms.Exists(ctx, dgst) - if err != nil { - t.Fatal(err) - } - if !ok { - t.Fatal("Manifest does not exist") - } - - manifest, err := ms.Get(ctx, dgst) - if err != nil { - t.Fatal(err) - } - v1manifest, ok := manifest.(*schema1.SignedManifest) - if !ok { - t.Fatalf("Unexpected manifest type from Get: %T", manifest) - } - - if err := checkEqualManifest(v1manifest, m1); err != nil { - t.Fatal(err) - } - - manifest, err = ms.Get(ctx, dgst, WithTag("latest")) - if err != nil { - t.Fatal(err) - } - v1manifest, ok = manifest.(*schema1.SignedManifest) - if !ok { - t.Fatalf("Unexpected manifest type from Get: %T", manifest) - } - - if err = checkEqualManifest(v1manifest, m1); err != nil { - t.Fatal(err) - } - - manifest, err = ms.Get(ctx, dgst, WithTag("badcontenttype")) - if err != nil { - t.Fatal(err) - } - v1manifest, ok = manifest.(*schema1.SignedManifest) - if !ok { - t.Fatalf("Unexpected manifest type from Get: %T", manifest) - } - - if err = checkEqualManifest(v1manifest, m1); err != nil { - t.Fatal(err) - } -} - -func TestManifestFetchWithEtag(t *testing.T) { - repo, _ := reference.ParseNamed("test.example.com/repo/by/tag") - _, d1, p1 := newRandomSchemaV1Manifest(repo, "latest", 6) - var m testutil.RequestResponseMap - addTestManifestWithEtag(repo, "latest", p1, &m, d1.String()) - - e, c := testServer(m) - defer c() - - ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - - ms, err := r.Manifests(ctx) - if err != nil { - t.Fatal(err) - } - - clientManifestService, ok := ms.(*manifests) - if !ok { - panic("wrong type for client manifest service") - } - _, err = clientManifestService.Get(ctx, d1, WithTag("latest"), AddEtagToTag("latest", d1.String())) - if err != distribution.ErrManifestNotModified { - t.Fatal(err) - } -} - -func TestManifestDelete(t *testing.T) { - repo, _ := reference.ParseNamed("test.example.com/repo/delete") - _, dgst1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) - _, dgst2, _ := newRandomSchemaV1Manifest(repo, "latest", 6) - var m testutil.RequestResponseMap - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "DELETE", - Route: "/v2/" + repo.Name() + "/manifests/" + dgst1.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - }), - }, - }) - - e, c := testServer(m) - defer c() - - r, err := NewRepository(context.Background(), repo, e, nil) - if err != nil { - t.Fatal(err) - } - ctx := context.Background() - ms, err := r.Manifests(ctx) - if err != nil { - t.Fatal(err) - } - - if err := ms.Delete(ctx, dgst1); err != nil { - t.Fatal(err) - } - if err := ms.Delete(ctx, dgst2); err == nil { - t.Fatal("Expected error deleting unknown manifest") - } - // TODO(dmcgowan): Check for specific unknown error -} - -func TestManifestPut(t *testing.T) { - repo, _ := reference.ParseNamed("test.example.com/repo/delete") - m1, dgst, _ := newRandomSchemaV1Manifest(repo, "other", 6) - - _, payload, err := m1.Payload() - if err != nil { - t.Fatal(err) - } - var m testutil.RequestResponseMap - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PUT", - Route: "/v2/" + repo.Name() + "/manifests/other", - Body: payload, - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Docker-Content-Digest": {dgst.String()}, - }), - }, - }) - - e, c := testServer(m) - defer c() - - r, err := NewRepository(context.Background(), repo, e, nil) - if err != nil { - t.Fatal(err) - } - ctx := context.Background() - ms, err := r.Manifests(ctx) - if err != nil { - t.Fatal(err) - } - - if _, err := ms.Put(ctx, m1, WithTag(m1.Tag)); err != nil { - t.Fatal(err) - } - - // TODO(dmcgowan): Check for invalid input error -} - -func TestManifestTags(t *testing.T) { - repo, _ := reference.ParseNamed("test.example.com/repo/tags/list") - tagsList := []byte(strings.TrimSpace(` -{ - "name": "test.example.com/repo/tags/list", - "tags": [ - "tag1", - "tag2", - "funtag" - ] -} - `)) - var m testutil.RequestResponseMap - for i := 0; i < 3; i++ { - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + repo.Name() + "/tags/list", - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: tagsList, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(tagsList))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - } - e, c := testServer(m) - defer c() - - r, err := NewRepository(context.Background(), repo, e, nil) - if err != nil { - t.Fatal(err) - } - - ctx := context.Background() - tagService := r.Tags(ctx) - - tags, err := tagService.All(ctx) - if err != nil { - t.Fatal(err) - } - if len(tags) != 3 { - t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) - } - - expected := map[string]struct{}{ - "tag1": {}, - "tag2": {}, - "funtag": {}, - } - for _, t := range tags { - delete(expected, t) - } - if len(expected) != 0 { - t.Fatalf("unexpected tags returned: %v", expected) - } - // TODO(dmcgowan): Check for error cases -} - -func TestManifestUnauthorized(t *testing.T) { - repo, _ := reference.ParseNamed("test.example.com/repo") - _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) - var m testutil.RequestResponseMap - - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + repo.Name() + "/manifests/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusUnauthorized, - Body: []byte("garbage"), - }, - }) - - e, c := testServer(m) - defer c() - - r, err := NewRepository(context.Background(), repo, e, nil) - if err != nil { - t.Fatal(err) - } - ctx := context.Background() - ms, err := r.Manifests(ctx) - if err != nil { - t.Fatal(err) - } - - _, err = ms.Get(ctx, dgst) - if err == nil { - t.Fatal("Expected error fetching manifest") - } - v2Err, ok := err.(errcode.Error) - if !ok { - t.Fatalf("Unexpected error type: %#v", err) - } - if v2Err.Code != errcode.ErrorCodeUnauthorized { - t.Fatalf("Unexpected error code: %s", v2Err.Code.String()) - } - if expected := errcode.ErrorCodeUnauthorized.Message(); v2Err.Message != expected { - t.Fatalf("Unexpected message value: %q, expected %q", v2Err.Message, expected) - } -} - -func TestCatalog(t *testing.T) { - var m testutil.RequestResponseMap - addTestCatalog( - "/v2/_catalog?n=5", - []byte("{\"repositories\":[\"foo\", \"bar\", \"baz\"]}"), "", &m) - - e, c := testServer(m) - defer c() - - entries := make([]string, 5) - - r, err := NewRegistry(context.Background(), e, nil) - if err != nil { - t.Fatal(err) - } - - ctx := context.Background() - numFilled, err := r.Repositories(ctx, entries, "") - if err != io.EOF { - t.Fatal(err) - } - - if numFilled != 3 { - t.Fatalf("Got wrong number of repos") - } -} - -func TestCatalogInParts(t *testing.T) { - var m testutil.RequestResponseMap - addTestCatalog( - "/v2/_catalog?n=2", - []byte("{\"repositories\":[\"bar\", \"baz\"]}"), - "", &m) - addTestCatalog( - "/v2/_catalog?last=baz&n=2", - []byte("{\"repositories\":[\"foo\"]}"), - "", &m) - - e, c := testServer(m) - defer c() - - entries := make([]string, 2) - - r, err := NewRegistry(context.Background(), e, nil) - if err != nil { - t.Fatal(err) - } - - ctx := context.Background() - numFilled, err := r.Repositories(ctx, entries, "") - if err != nil { - t.Fatal(err) - } - - if numFilled != 2 { - t.Fatalf("Got wrong number of repos") - } - - numFilled, err = r.Repositories(ctx, entries, "baz") - if err != io.EOF { - t.Fatal(err) - } - - if numFilled != 1 { - t.Fatalf("Got wrong number of repos") - } -} - -func TestSanitizeLocation(t *testing.T) { - for _, testcase := range []struct { - description string - location string - source string - expected string - err error - }{ - { - description: "ensure relative location correctly resolved", - location: "/v2/foo/baasdf", - source: "http://blahalaja.com/v1", - expected: "http://blahalaja.com/v2/foo/baasdf", - }, - { - description: "ensure parameters are preserved", - location: "/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", - source: "http://blahalaja.com/v1", - expected: "http://blahalaja.com/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", - }, - { - description: "ensure new hostname overidden", - location: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", - source: "http://blahalaja.com/v1", - expected: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", - }, - } { - fatalf := func(format string, args ...interface{}) { - t.Fatalf(testcase.description+": "+format, args...) - } - - s, err := sanitizeLocation(testcase.location, testcase.source) - if err != testcase.err { - if testcase.err != nil { - fatalf("expected error: %v != %v", err, testcase) - } else { - fatalf("unexpected error sanitizing: %v", err) - } - } - - if s != testcase.expected { - fatalf("bad sanitize: %q != %q", s, testcase.expected) - } - } -} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go index b27b6c23..e1b17a03 100644 --- a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go +++ b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -1,12 +1,22 @@ package transport import ( - "bufio" "errors" "fmt" "io" "net/http" "os" + "regexp" + "strconv" +) + +var ( + contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) + + // ErrWrongCodeForByteRange is returned if the client sends a request + // with a Range header but the server returns a 2xx or 3xx code other + // than 206 Partial Content. + ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") ) // ReadSeekCloser combines io.ReadSeeker with io.Closer. @@ -40,8 +50,6 @@ type httpReadSeeker struct { // rc is the remote read closer. rc io.ReadCloser - // brd is a buffer for internal buffered io. - brd *bufio.Reader // readerOffset tracks the offset as of the last read. readerOffset int64 // seekOffset allows Seek to override the offset. Seek changes @@ -58,7 +66,7 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { return 0, hrs.err } - // If we seeked to a different position, we need to reset the + // If we sought to a different position, we need to reset the // connection. This logic is here instead of Seek so that if // a seek is undone before the next read, the connection doesn't // need to be closed and reopened. A common example of this is @@ -79,11 +87,6 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { hrs.seekOffset += int64(n) hrs.readerOffset += int64(n) - // Simulate io.EOF error if we reach filesize. - if err == nil && hrs.size >= 0 && hrs.readerOffset >= hrs.size { - err = io.EOF - } - return n, err } @@ -92,8 +95,18 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { return 0, hrs.err } + lastReaderOffset := hrs.readerOffset + + if whence == os.SEEK_SET && hrs.rc == nil { + // If no request has been made yet, and we are seeking to an + // absolute position, set the read offset as well to avoid an + // unnecessary request. + hrs.readerOffset = offset + } + _, err := hrs.reader() if err != nil { + hrs.readerOffset = lastReaderOffset return 0, err } @@ -101,14 +114,14 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { switch whence { case os.SEEK_CUR: - newOffset += int64(offset) + newOffset += offset case os.SEEK_END: if hrs.size < 0 { return 0, errors.New("content length not known") } - newOffset = hrs.size + int64(offset) + newOffset = hrs.size + offset case os.SEEK_SET: - newOffset = int64(offset) + newOffset = offset } if newOffset < 0 { @@ -131,7 +144,6 @@ func (hrs *httpReadSeeker) Close() error { } hrs.rc = nil - hrs.brd = nil hrs.err = errors.New("httpLayer: closed") @@ -154,7 +166,7 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { } if hrs.rc != nil { - return hrs.brd, nil + return hrs.rc, nil } req, err := http.NewRequest("GET", hrs.url, nil) @@ -163,10 +175,8 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { } if hrs.readerOffset > 0 { - // TODO(stevvooe): Get this working correctly. - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", "1-") + req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) // TODO: get context in here // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) } @@ -179,12 +189,55 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { // Normally would use client.SuccessStatus, but that would be a cyclic // import if resp.StatusCode >= 200 && resp.StatusCode <= 399 { - hrs.rc = resp.Body - if resp.StatusCode == http.StatusOK { + if hrs.readerOffset > 0 { + if resp.StatusCode != http.StatusPartialContent { + return nil, ErrWrongCodeForByteRange + } + + contentRange := resp.Header.Get("Content-Range") + if contentRange == "" { + return nil, errors.New("no Content-Range header found in HTTP 206 response") + } + + submatches := contentRangeRegexp.FindStringSubmatch(contentRange) + if len(submatches) < 4 { + return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) + } + + startByte, err := strconv.ParseUint(submatches[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) + } + + if startByte != uint64(hrs.readerOffset) { + return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) + } + + endByte, err := strconv.ParseUint(submatches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) + } + + if submatches[3] == "*" { + hrs.size = -1 + } else { + size, err := strconv.ParseUint(submatches[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) + } + + if endByte+1 != size { + return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) + } + + hrs.size = int64(size) + } + } else if resp.StatusCode == http.StatusOK { hrs.size = resp.ContentLength } else { hrs.size = -1 } + hrs.rc = resp.Body } else { defer resp.Body.Close() if hrs.errorHandler != nil { @@ -193,11 +246,5 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) } - if hrs.brd == nil { - hrs.brd = bufio.NewReader(hrs.rc) - } else { - hrs.brd.Reset(hrs.rc) - } - - return hrs.brd, nil + return hrs.rc, nil } diff --git a/vendor/github.com/docker/distribution/registry/doc.go b/vendor/github.com/docker/distribution/registry/doc.go deleted file mode 100644 index a1ba7f3a..00000000 --- a/vendor/github.com/docker/distribution/registry/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package registry provides the main entrypoints for running a registry. -package registry diff --git a/vendor/github.com/docker/distribution/registry/registry.go b/vendor/github.com/docker/distribution/registry/registry.go deleted file mode 100644 index 86cb6a17..00000000 --- a/vendor/github.com/docker/distribution/registry/registry.go +++ /dev/null @@ -1,337 +0,0 @@ -package registry - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" - "os" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/formatters/logstash" - "github.com/bugsnag/bugsnag-go" - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" - "github.com/docker/distribution/health" - "github.com/docker/distribution/registry/handlers" - "github.com/docker/distribution/registry/listener" - "github.com/docker/distribution/uuid" - "github.com/docker/distribution/version" - gorhandlers "github.com/gorilla/handlers" - "github.com/spf13/cobra" - "github.com/yvasiyarov/gorelic" -) - -// Cmd is a cobra command for running the registry. -var Cmd = &cobra.Command{ - Use: "registry ", - Short: "registry stores and distributes Docker images", - Long: "registry stores and distributes Docker images.", - Run: func(cmd *cobra.Command, args []string) { - if showVersion { - version.PrintVersion() - return - } - - // setup context - ctx := context.WithVersion(context.Background(), version.Version) - - config, err := resolveConfiguration(args) - if err != nil { - fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) - cmd.Usage() - os.Exit(1) - } - - if config.HTTP.Debug.Addr != "" { - go func(addr string) { - log.Infof("debug server listening %v", addr) - if err := http.ListenAndServe(addr, nil); err != nil { - log.Fatalf("error listening on debug interface: %v", err) - } - }(config.HTTP.Debug.Addr) - } - - registry, err := NewRegistry(ctx, config) - if err != nil { - log.Fatalln(err) - } - - if err = registry.ListenAndServe(); err != nil { - log.Fatalln(err) - } - }, -} - -var showVersion bool - -func init() { - Cmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") -} - -// A Registry represents a complete instance of the registry. -// TODO(aaronl): It might make sense for Registry to become an interface. -type Registry struct { - config *configuration.Configuration - app *handlers.App - server *http.Server -} - -// NewRegistry creates a new registry from a context and configuration struct. -func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { - var err error - ctx, err = configureLogging(ctx, config) - if err != nil { - return nil, fmt.Errorf("error configuring logger: %v", err) - } - - // inject a logger into the uuid library. warns us if there is a problem - // with uuid generation under low entropy. - uuid.Loggerf = context.GetLogger(ctx).Warnf - - app := handlers.NewApp(ctx, config) - // TODO(aaronl): The global scope of the health checks means NewRegistry - // can only be called once per process. - app.RegisterHealthChecks() - handler := configureReporting(app) - handler = alive("/", handler) - handler = health.Handler(handler) - handler = panicHandler(handler) - handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) - - server := &http.Server{ - Handler: handler, - } - - return &Registry{ - app: app, - config: config, - server: server, - }, nil -} - -// ListenAndServe runs the registry's HTTP server. -func (registry *Registry) ListenAndServe() error { - config := registry.config - - ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) - if err != nil { - return err - } - - if config.HTTP.TLS.Certificate != "" { - tlsConf := &tls.Config{ - ClientAuth: tls.NoClientCert, - NextProtos: []string{"http/1.1"}, - Certificates: make([]tls.Certificate, 1), - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - }, - } - - tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) - if err != nil { - return err - } - - if len(config.HTTP.TLS.ClientCAs) != 0 { - pool := x509.NewCertPool() - - for _, ca := range config.HTTP.TLS.ClientCAs { - caPem, err := ioutil.ReadFile(ca) - if err != nil { - return err - } - - if ok := pool.AppendCertsFromPEM(caPem); !ok { - return fmt.Errorf("Could not add CA to pool") - } - } - - for _, subj := range pool.Subjects() { - context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) - } - - tlsConf.ClientAuth = tls.RequireAndVerifyClientCert - tlsConf.ClientCAs = pool - } - - ln = tls.NewListener(ln, tlsConf) - context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) - } else { - context.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) - } - - return registry.server.Serve(ln) -} - -func configureReporting(app *handlers.App) http.Handler { - var handler http.Handler = app - - if app.Config.Reporting.Bugsnag.APIKey != "" { - bugsnagConfig := bugsnag.Configuration{ - APIKey: app.Config.Reporting.Bugsnag.APIKey, - // TODO(brianbland): provide the registry version here - // AppVersion: "2.0", - } - if app.Config.Reporting.Bugsnag.ReleaseStage != "" { - bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage - } - if app.Config.Reporting.Bugsnag.Endpoint != "" { - bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint - } - bugsnag.Configure(bugsnagConfig) - - handler = bugsnag.Handler(handler) - } - - if app.Config.Reporting.NewRelic.LicenseKey != "" { - agent := gorelic.NewAgent() - agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey - if app.Config.Reporting.NewRelic.Name != "" { - agent.NewrelicName = app.Config.Reporting.NewRelic.Name - } - agent.CollectHTTPStat = true - agent.Verbose = app.Config.Reporting.NewRelic.Verbose - agent.Run() - - handler = agent.WrapHTTPHandler(handler) - } - - return handler -} - -// configureLogging prepares the context with a logger using the -// configuration. -func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { - if config.Log.Level == "" && config.Log.Formatter == "" { - // If no config for logging is set, fallback to deprecated "Loglevel". - log.SetLevel(logLevel(config.Loglevel)) - ctx = context.WithLogger(ctx, context.GetLogger(ctx)) - return ctx, nil - } - - log.SetLevel(logLevel(config.Log.Level)) - - formatter := config.Log.Formatter - if formatter == "" { - formatter = "text" // default formatter - } - - switch formatter { - case "json": - log.SetFormatter(&log.JSONFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - case "text": - log.SetFormatter(&log.TextFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - case "logstash": - log.SetFormatter(&logstash.LogstashFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - default: - // just let the library use default on empty string. - if config.Log.Formatter != "" { - return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) - } - } - - if config.Log.Formatter != "" { - log.Debugf("using %q logging formatter", config.Log.Formatter) - } - - if len(config.Log.Fields) > 0 { - // build up the static fields, if present. - var fields []interface{} - for k := range config.Log.Fields { - fields = append(fields, k) - } - - ctx = context.WithValues(ctx, config.Log.Fields) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) - } - - return ctx, nil -} - -func logLevel(level configuration.Loglevel) log.Level { - l, err := log.ParseLevel(string(level)) - if err != nil { - l = log.InfoLevel - log.Warnf("error parsing level %q: %v, using %q ", level, err, l) - } - - return l -} - -// panicHandler add a HTTP handler to web app. The handler recover the happening -// panic. logrus.Panic transmits panic message to pre-config log hooks, which is -// defined in config.yml. -func panicHandler(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer func() { - if err := recover(); err != nil { - log.Panic(fmt.Sprintf("%v", err)) - } - }() - handler.ServeHTTP(w, r) - }) -} - -// alive simply wraps the handler with a route that always returns an http 200 -// response when the path is matched. If the path is not matched, the request -// is passed to the provided handler. There is no guarantee of anything but -// that the server is up. Wrap with other handlers (such as health.Handler) -// for greater affect. -func alive(path string, handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == path { - w.Header().Set("Cache-Control", "no-cache") - w.WriteHeader(http.StatusOK) - return - } - - handler.ServeHTTP(w, r) - }) -} - -func resolveConfiguration(args []string) (*configuration.Configuration, error) { - var configurationPath string - - if len(args) > 0 { - configurationPath = args[0] - } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { - configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") - } - - if configurationPath == "" { - return nil, fmt.Errorf("configuration path unspecified") - } - - fp, err := os.Open(configurationPath) - if err != nil { - return nil, err - } - - defer fp.Close() - - config, err := configuration.Parse(fp) - if err != nil { - return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) - } - - return config, nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/blob_test.go b/vendor/github.com/docker/distribution/registry/storage/blob_test.go deleted file mode 100644 index 246648b0..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/blob_test.go +++ /dev/null @@ -1,568 +0,0 @@ -package storage - -import ( - "bytes" - "crypto/sha256" - "fmt" - "io" - "io/ioutil" - "os" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/testutil" -) - -// TestSimpleBlobUpload covers the blob upload process, exercising common -// error paths that might be seen during an upload. -func TestSimpleBlobUpload(t *testing.T) { - randomDataReader, dgst, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating random reader: %v", err) - } - - ctx := context.Background() - imageName, _ := reference.ParseNamed("foo/bar") - driver := inmemory.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - bs := repository.Blobs(ctx) - - h := sha256.New() - rd := io.TeeReader(randomDataReader, h) - - blobUpload, err := bs.Create(ctx) - - if err != nil { - t.Fatalf("unexpected error starting layer upload: %s", err) - } - - // Cancel the upload then restart it - if err := blobUpload.Cancel(ctx); err != nil { - t.Fatalf("unexpected error during upload cancellation: %v", err) - } - - // Do a resume, get unknown upload - blobUpload, err = bs.Resume(ctx, blobUpload.ID()) - if err != distribution.ErrBlobUploadUnknown { - t.Fatalf("unexpected error resuming upload, should be unknown: %v", err) - } - - // Restart! - blobUpload, err = bs.Create(ctx) - if err != nil { - t.Fatalf("unexpected error starting layer upload: %s", err) - } - - // Get the size of our random tarfile - randomDataSize, err := seekerSize(randomDataReader) - if err != nil { - t.Fatalf("error getting seeker size of random data: %v", err) - } - - nn, err := io.Copy(blobUpload, rd) - if err != nil { - t.Fatalf("unexpected error uploading layer data: %v", err) - } - - if nn != randomDataSize { - t.Fatalf("layer data write incomplete") - } - - offset, err := blobUpload.Seek(0, os.SEEK_CUR) - if err != nil { - t.Fatalf("unexpected error seeking layer upload: %v", err) - } - - if offset != nn { - t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn) - } - blobUpload.Close() - - // Do a resume, for good fun - blobUpload, err = bs.Resume(ctx, blobUpload.ID()) - if err != nil { - t.Fatalf("unexpected error resuming upload: %v", err) - } - - sha256Digest := digest.NewDigest("sha256", h) - desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) - if err != nil { - t.Fatalf("unexpected error finishing layer upload: %v", err) - } - - // After finishing an upload, it should no longer exist. - if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown { - t.Fatalf("expected layer upload to be unknown, got %v", err) - } - - // Test for existence. - statDesc, err := bs.Stat(ctx, desc.Digest) - if err != nil { - t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) - } - - if statDesc != desc { - t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) - } - - rc, err := bs.Open(ctx, desc.Digest) - if err != nil { - t.Fatalf("unexpected error opening blob for read: %v", err) - } - defer rc.Close() - - h.Reset() - nn, err = io.Copy(h, rc) - if err != nil { - t.Fatalf("error reading layer: %v", err) - } - - if nn != randomDataSize { - t.Fatalf("incorrect read length") - } - - if digest.NewDigest("sha256", h) != sha256Digest { - t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest) - } - - // Delete a blob - err = bs.Delete(ctx, desc.Digest) - if err != nil { - t.Fatalf("Unexpected error deleting blob") - } - - d, err := bs.Stat(ctx, desc.Digest) - if err == nil { - t.Fatalf("unexpected non-error stating deleted blob: %v", d) - } - - switch err { - case distribution.ErrBlobUnknown: - break - default: - t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) - } - - _, err = bs.Open(ctx, desc.Digest) - if err == nil { - t.Fatalf("unexpected success opening deleted blob for read") - } - - switch err { - case distribution.ErrBlobUnknown: - break - default: - t.Errorf("Unexpected error type getting deleted manifest: %#v", err) - } - - // Re-upload the blob - randomBlob, err := ioutil.ReadAll(randomDataReader) - if err != nil { - t.Fatalf("Error reading all of blob %s", err.Error()) - } - expectedDigest := digest.FromBytes(randomBlob) - simpleUpload(t, bs, randomBlob, expectedDigest) - - d, err = bs.Stat(ctx, expectedDigest) - if err != nil { - t.Errorf("unexpected error stat-ing blob") - } - if d.Digest != expectedDigest { - t.Errorf("Mismatching digest with restored blob") - } - - _, err = bs.Open(ctx, expectedDigest) - if err != nil { - t.Errorf("Unexpected error opening blob") - } - - // Reuse state to test delete with a delete-disabled registry - registry, err = NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - repository, err = registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - bs = repository.Blobs(ctx) - err = bs.Delete(ctx, desc.Digest) - if err == nil { - t.Errorf("Unexpected success deleting while disabled") - } -} - -// TestSimpleBlobRead just creates a simple blob file and ensures that basic -// open, read, seek, read works. More specific edge cases should be covered in -// other tests. -func TestSimpleBlobRead(t *testing.T) { - ctx := context.Background() - imageName, _ := reference.ParseNamed("foo/bar") - driver := inmemory.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - bs := repository.Blobs(ctx) - - randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. - if err != nil { - t.Fatalf("error creating random data: %v", err) - } - - // Test for existence. - desc, err := bs.Stat(ctx, dgst) - if err != distribution.ErrBlobUnknown { - t.Fatalf("expected not found error when testing for existence: %v", err) - } - - rc, err := bs.Open(ctx, dgst) - if err != distribution.ErrBlobUnknown { - t.Fatalf("expected not found error when opening non-existent blob: %v", err) - } - - randomLayerSize, err := seekerSize(randomLayerReader) - if err != nil { - t.Fatalf("error getting seeker size for random layer: %v", err) - } - - descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize} - t.Logf("desc: %v", descBefore) - - desc, err = addBlob(ctx, bs, descBefore, randomLayerReader) - if err != nil { - t.Fatalf("error adding blob to blobservice: %v", err) - } - - if desc.Size != randomLayerSize { - t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize) - } - - rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest. - if err != nil { - t.Fatalf("error opening blob with %v: %v", dgst, err) - } - defer rc.Close() - - // Now check the sha digest and ensure its the same - h := sha256.New() - nn, err := io.Copy(h, rc) - if err != nil { - t.Fatalf("unexpected error copying to hash: %v", err) - } - - if nn != randomLayerSize { - t.Fatalf("stored incorrect number of bytes in blob: %d != %d", nn, randomLayerSize) - } - - sha256Digest := digest.NewDigest("sha256", h) - if sha256Digest != desc.Digest { - t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, desc.Digest) - } - - // Now seek back the blob, read the whole thing and check against randomLayerData - offset, err := rc.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatalf("error seeking blob: %v", err) - } - - if offset != 0 { - t.Fatalf("seek failed: expected 0 offset, got %d", offset) - } - - p, err := ioutil.ReadAll(rc) - if err != nil { - t.Fatalf("error reading all of blob: %v", err) - } - - if len(p) != int(randomLayerSize) { - t.Fatalf("blob data read has different length: %v != %v", len(p), randomLayerSize) - } - - // Reset the randomLayerReader and read back the buffer - _, err = randomLayerReader.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatalf("error resetting layer reader: %v", err) - } - - randomLayerData, err := ioutil.ReadAll(randomLayerReader) - if err != nil { - t.Fatalf("random layer read failed: %v", err) - } - - if !bytes.Equal(p, randomLayerData) { - t.Fatalf("layer data not equal") - } -} - -// TestBlobMount covers the blob mount process, exercising common -// error paths that might be seen during a mount. -func TestBlobMount(t *testing.T) { - randomDataReader, dgst, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating random reader: %v", err) - } - - ctx := context.Background() - imageName, _ := reference.ParseNamed("foo/bar") - sourceImageName, _ := reference.ParseNamed("foo/source") - driver := inmemory.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - sourceRepository, err := registry.Repository(ctx, sourceImageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - - sbs := sourceRepository.Blobs(ctx) - - blobUpload, err := sbs.Create(ctx) - - if err != nil { - t.Fatalf("unexpected error starting layer upload: %s", err) - } - - // Get the size of our random tarfile - randomDataSize, err := seekerSize(randomDataReader) - if err != nil { - t.Fatalf("error getting seeker size of random data: %v", err) - } - - nn, err := io.Copy(blobUpload, randomDataReader) - if err != nil { - t.Fatalf("unexpected error uploading layer data: %v", err) - } - - desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) - if err != nil { - t.Fatalf("unexpected error finishing layer upload: %v", err) - } - - // Test for existence. - statDesc, err := sbs.Stat(ctx, desc.Digest) - if err != nil { - t.Fatalf("unexpected error checking for existence: %v, %#v", err, sbs) - } - - if statDesc != desc { - t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) - } - - bs := repository.Blobs(ctx) - // Test destination for existence. - statDesc, err = bs.Stat(ctx, desc.Digest) - if err == nil { - t.Fatalf("unexpected non-error stating unmounted blob: %v", desc) - } - - canonicalRef, err := reference.WithDigest(sourceRepository.Name(), desc.Digest) - if err != nil { - t.Fatal(err) - } - - bw, err := bs.Create(ctx, WithMountFrom(canonicalRef)) - if bw != nil { - t.Fatal("unexpected blobwriter returned from Create call, should mount instead") - } - - ebm, ok := err.(distribution.ErrBlobMounted) - if !ok { - t.Fatalf("unexpected error mounting layer: %v", err) - } - - if ebm.Descriptor != desc { - t.Fatalf("descriptors not equal: %v != %v", ebm.Descriptor, desc) - } - - // Test for existence. - statDesc, err = bs.Stat(ctx, desc.Digest) - if err != nil { - t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) - } - - if statDesc != desc { - t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) - } - - rc, err := bs.Open(ctx, desc.Digest) - if err != nil { - t.Fatalf("unexpected error opening blob for read: %v", err) - } - defer rc.Close() - - h := sha256.New() - nn, err = io.Copy(h, rc) - if err != nil { - t.Fatalf("error reading layer: %v", err) - } - - if nn != randomDataSize { - t.Fatalf("incorrect read length") - } - - if digest.NewDigest("sha256", h) != dgst { - t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), dgst) - } - - // Delete the blob from the source repo - err = sbs.Delete(ctx, desc.Digest) - if err != nil { - t.Fatalf("Unexpected error deleting blob") - } - - d, err := bs.Stat(ctx, desc.Digest) - if err != nil { - t.Fatalf("unexpected error stating blob deleted from source repository: %v", err) - } - - d, err = sbs.Stat(ctx, desc.Digest) - if err == nil { - t.Fatalf("unexpected non-error stating deleted blob: %v", d) - } - - switch err { - case distribution.ErrBlobUnknown: - break - default: - t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) - } - - // Delete the blob from the dest repo - err = bs.Delete(ctx, desc.Digest) - if err != nil { - t.Fatalf("Unexpected error deleting blob") - } - - d, err = bs.Stat(ctx, desc.Digest) - if err == nil { - t.Fatalf("unexpected non-error stating deleted blob: %v", d) - } - - switch err { - case distribution.ErrBlobUnknown: - break - default: - t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) - } -} - -// TestLayerUploadZeroLength uploads zero-length -func TestLayerUploadZeroLength(t *testing.T) { - ctx := context.Background() - imageName, _ := reference.ParseNamed("foo/bar") - driver := inmemory.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - bs := repository.Blobs(ctx) - - simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar) -} - -func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) { - ctx := context.Background() - wr, err := bs.Create(ctx) - if err != nil { - t.Fatalf("unexpected error starting upload: %v", err) - } - - nn, err := io.Copy(wr, bytes.NewReader(blob)) - if err != nil { - t.Fatalf("error copying into blob writer: %v", err) - } - - if nn != 0 { - t.Fatalf("unexpected number of bytes copied: %v > 0", nn) - } - - dgst, err := digest.FromReader(bytes.NewReader(blob)) - if err != nil { - t.Fatalf("error getting digest: %v", err) - } - - if dgst != expectedDigest { - // sanity check on zero digest - t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest) - } - - desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}) - if err != nil { - t.Fatalf("unexpected error committing write: %v", err) - } - - if desc.Digest != dgst { - t.Fatalf("unexpected digest: %v != %v", desc.Digest, dgst) - } -} - -// seekerSize seeks to the end of seeker, checks the size and returns it to -// the original state, returning the size. The state of the seeker should be -// treated as unknown if an error is returned. -func seekerSize(seeker io.ReadSeeker) (int64, error) { - current, err := seeker.Seek(0, os.SEEK_CUR) - if err != nil { - return 0, err - } - - end, err := seeker.Seek(0, os.SEEK_END) - if err != nil { - return 0, err - } - - resumed, err := seeker.Seek(current, os.SEEK_SET) - if err != nil { - return 0, err - } - - if resumed != current { - return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location") - } - - return end, nil -} - -// addBlob simply consumes the reader and inserts into the blob service, -// returning a descriptor on success. -func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distribution.Descriptor, rd io.Reader) (distribution.Descriptor, error) { - wr, err := bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, err - } - defer wr.Cancel(ctx) - - if nn, err := io.Copy(wr, rd); err != nil { - return distribution.Descriptor{}, err - } else if nn != desc.Size { - return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Size) - } - - return wr.Commit(ctx, desc) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go b/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go deleted file mode 100644 index fad0a77a..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go +++ /dev/null @@ -1,60 +0,0 @@ -package storage - -import ( - "expvar" - "sync/atomic" - - "github.com/docker/distribution/registry/storage/cache" -) - -type blobStatCollector struct { - metrics cache.Metrics -} - -func (bsc *blobStatCollector) Hit() { - atomic.AddUint64(&bsc.metrics.Requests, 1) - atomic.AddUint64(&bsc.metrics.Hits, 1) -} - -func (bsc *blobStatCollector) Miss() { - atomic.AddUint64(&bsc.metrics.Requests, 1) - atomic.AddUint64(&bsc.metrics.Misses, 1) -} - -func (bsc *blobStatCollector) Metrics() cache.Metrics { - return bsc.metrics -} - -// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor -// cache requests. Note this is kept globally and made available via expvar. -// For more detailed metrics, its recommend to instrument a particular cache -// implementation. -var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{} - -func init() { - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - cache := registry.(*expvar.Map).Get("cache") - if cache == nil { - cache = &expvar.Map{} - cache.(*expvar.Map).Init() - registry.(*expvar.Map).Set("cache", cache) - } - - storage := cache.(*expvar.Map).Get("storage") - if storage == nil { - storage = &expvar.Map{} - storage.(*expvar.Map).Init() - cache.(*expvar.Map).Set("storage", storage) - } - - storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { - // no need for synchronous access: the increments are atomic and - // during reading, we don't care if the data is up to date. The - // numbers will always *eventually* be reported correctly. - return blobStatterCacheMetrics - })) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobserver.go b/vendor/github.com/docker/distribution/registry/storage/blobserver.go deleted file mode 100644 index 2655e011..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/blobserver.go +++ /dev/null @@ -1,78 +0,0 @@ -package storage - -import ( - "fmt" - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver" -) - -// TODO(stevvooe): This should configurable in the future. -const blobCacheControlMaxAge = 365 * 24 * time.Hour - -// blobServer simply serves blobs from a driver instance using a path function -// to identify paths and a descriptor service to fill in metadata. -type blobServer struct { - driver driver.StorageDriver - statter distribution.BlobStatter - pathFn func(dgst digest.Digest) (string, error) - redirect bool // allows disabling URLFor redirects -} - -func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - desc, err := bs.statter.Stat(ctx, dgst) - if err != nil { - return err - } - - path, err := bs.pathFn(desc.Digest) - if err != nil { - return err - } - - if bs.redirect { - redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) - switch err.(type) { - case nil: - // Redirect to storage URL. - http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) - return err - - case driver.ErrUnsupportedMethod: - // Fallback to serving the content directly. - default: - // Some unexpected error. - return err - } - } - - br, err := newFileReader(ctx, bs.driver, path, desc.Size) - if err != nil { - return err - } - defer br.Close() - - w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent - w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) - - if w.Header().Get("Docker-Content-Digest") == "" { - w.Header().Set("Docker-Content-Digest", desc.Digest.String()) - } - - if w.Header().Get("Content-Type") == "" { - // Set the content type if not already set. - w.Header().Set("Content-Type", desc.MediaType) - } - - if w.Header().Get("Content-Length") == "" { - // Set the content length if not already set. - w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) - } - - http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobstore.go b/vendor/github.com/docker/distribution/registry/storage/blobstore.go deleted file mode 100644 index f8fe23fe..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/blobstore.go +++ /dev/null @@ -1,192 +0,0 @@ -package storage - -import ( - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver" -) - -// blobStore implements the read side of the blob store interface over a -// driver without enforcing per-repository membership. This object is -// intentionally a leaky abstraction, providing utility methods that support -// creating and traversing backend links. -type blobStore struct { - driver driver.StorageDriver - statter distribution.BlobStatter -} - -var _ distribution.BlobProvider = &blobStore{} - -// Get implements the BlobReadService.Get call. -func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - bp, err := bs.path(dgst) - if err != nil { - return nil, err - } - - p, err := bs.driver.GetContent(ctx, bp) - if err != nil { - switch err.(type) { - case driver.PathNotFoundError: - return nil, distribution.ErrBlobUnknown - } - - return nil, err - } - - return p, err -} - -func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - desc, err := bs.statter.Stat(ctx, dgst) - if err != nil { - return nil, err - } - - path, err := bs.path(desc.Digest) - if err != nil { - return nil, err - } - - return newFileReader(ctx, bs.driver, path, desc.Size) -} - -// Put stores the content p in the blob store, calculating the digest. If the -// content is already present, only the digest will be returned. This should -// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations -func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - dgst := digest.FromBytes(p) - desc, err := bs.statter.Stat(ctx, dgst) - if err == nil { - // content already present - return desc, nil - } else if err != distribution.ErrBlobUnknown { - context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %#v", dgst, err) - // real error, return it - return distribution.Descriptor{}, err - } - - bp, err := bs.path(dgst) - if err != nil { - return distribution.Descriptor{}, err - } - - // TODO(stevvooe): Write out mediatype here, as well. - - return distribution.Descriptor{ - Size: int64(len(p)), - - // NOTE(stevvooe): The central blob store firewalls media types from - // other users. The caller should look this up and override the value - // for the specific repository. - MediaType: "application/octet-stream", - Digest: dgst, - }, bs.driver.PutContent(ctx, bp, p) -} - -// path returns the canonical path for the blob identified by digest. The blob -// may or may not exist. -func (bs *blobStore) path(dgst digest.Digest) (string, error) { - bp, err := pathFor(blobDataPathSpec{ - digest: dgst, - }) - - if err != nil { - return "", err - } - - return bp, nil -} - -// link links the path to the provided digest by writing the digest into the -// target file. Caller must ensure that the blob actually exists. -func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error { - // The contents of the "link" file are the exact string contents of the - // digest, which is specified in that package. - return bs.driver.PutContent(ctx, path, []byte(dgst)) -} - -// readlink returns the linked digest at path. -func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) { - content, err := bs.driver.GetContent(ctx, path) - if err != nil { - return "", err - } - - linked, err := digest.ParseDigest(string(content)) - if err != nil { - return "", err - } - - return linked, nil -} - -// resolve reads the digest link at path and returns the blob store path. -func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { - dgst, err := bs.readlink(ctx, path) - if err != nil { - return "", err - } - - return bs.path(dgst) -} - -type blobStatter struct { - driver driver.StorageDriver -} - -var _ distribution.BlobDescriptorService = &blobStatter{} - -// Stat implements BlobStatter.Stat by returning the descriptor for the blob -// in the main blob store. If this method returns successfully, there is -// strong guarantee that the blob exists and is available. -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - path, err := pathFor(blobDataPathSpec{ - digest: dgst, - }) - - if err != nil { - return distribution.Descriptor{}, err - } - - fi, err := bs.driver.Stat(ctx, path) - if err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return distribution.Descriptor{}, distribution.ErrBlobUnknown - default: - return distribution.Descriptor{}, err - } - } - - if fi.IsDir() { - // NOTE(stevvooe): This represents a corruption situation. Somehow, we - // calculated a blob path and then detected a directory. We log the - // error and then error on the side of not knowing about the blob. - context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - // TODO(stevvooe): Add method to resolve the mediatype. We can store and - // cache a "global" media type for the blob, even if a specific repo has a - // mediatype that overrides the main one. - - return distribution.Descriptor{ - Size: fi.Size(), - - // NOTE(stevvooe): The central blob store firewalls media types from - // other users. The caller should look this up and override the value - // for the specific repository. - MediaType: "application/octet-stream", - Digest: dgst, - }, nil -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - return distribution.ErrUnsupported -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return distribution.ErrUnsupported -} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter.go deleted file mode 100644 index e485cc6d..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/blobwriter.go +++ /dev/null @@ -1,380 +0,0 @@ -package storage - -import ( - "errors" - "fmt" - "io" - "path" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -var ( - errResumableDigestNotAvailable = errors.New("resumable digest not available") -) - -// layerWriter is used to control the various aspects of resumable -// layer upload. It implements the LayerUpload interface. -type blobWriter struct { - blobStore *linkedBlobStore - - id string - startedAt time.Time - digester digest.Digester - written int64 // track the contiguous write - - // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy - // LayerUpload Interface - bufferedFileWriter - - resumableDigestEnabled bool -} - -var _ distribution.BlobWriter = &blobWriter{} - -// ID returns the identifier for this upload. -func (bw *blobWriter) ID() string { - return bw.id -} - -func (bw *blobWriter) StartedAt() time.Time { - return bw.startedAt -} - -// Commit marks the upload as completed, returning a valid descriptor. The -// final size and digest are checked against the first descriptor provided. -func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - context.GetLogger(ctx).Debug("(*blobWriter).Commit") - - if err := bw.bufferedFileWriter.Close(); err != nil { - return distribution.Descriptor{}, err - } - - canonical, err := bw.validateBlob(ctx, desc) - if err != nil { - return distribution.Descriptor{}, err - } - - if err := bw.moveBlob(ctx, canonical); err != nil { - return distribution.Descriptor{}, err - } - - if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil { - return distribution.Descriptor{}, err - } - - if err := bw.removeResources(ctx); err != nil { - return distribution.Descriptor{}, err - } - - err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical) - if err != nil { - return distribution.Descriptor{}, err - } - - return canonical, nil -} - -// Rollback the blob upload process, releasing any resources associated with -// the writer and canceling the operation. -func (bw *blobWriter) Cancel(ctx context.Context) error { - context.GetLogger(ctx).Debug("(*blobWriter).Rollback") - if err := bw.removeResources(ctx); err != nil { - return err - } - - bw.Close() - return nil -} - -func (bw *blobWriter) Write(p []byte) (int, error) { - // Ensure that the current write offset matches how many bytes have been - // written to the digester. If not, we need to update the digest state to - // match the current write position. - if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { - return 0, err - } - - n, err := io.MultiWriter(&bw.bufferedFileWriter, bw.digester.Hash()).Write(p) - bw.written += int64(n) - - return n, err -} - -func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { - // Ensure that the current write offset matches how many bytes have been - // written to the digester. If not, we need to update the digest state to - // match the current write position. - if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { - return 0, err - } - - nn, err := bw.bufferedFileWriter.ReadFrom(io.TeeReader(r, bw.digester.Hash())) - bw.written += nn - - return nn, err -} - -func (bw *blobWriter) Close() error { - if bw.err != nil { - return bw.err - } - - if err := bw.storeHashState(bw.blobStore.ctx); err != nil { - return err - } - - return bw.bufferedFileWriter.Close() -} - -// validateBlob checks the data against the digest, returning an error if it -// does not match. The canonical descriptor is returned. -func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - var ( - verified, fullHash bool - canonical digest.Digest - ) - - if desc.Digest == "" { - // if no descriptors are provided, we have nothing to validate - // against. We don't really want to support this for the registry. - return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ - Reason: fmt.Errorf("cannot validate against empty digest"), - } - } - - // Stat the on disk file - if fi, err := bw.bufferedFileWriter.driver.Stat(ctx, bw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // NOTE(stevvooe): We really don't care if the file is - // not actually present for the reader. We now assume - // that the desc length is zero. - desc.Size = 0 - default: - // Any other error we want propagated up the stack. - return distribution.Descriptor{}, err - } - } else { - if fi.IsDir() { - return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) - } - - bw.size = fi.Size() - } - - if desc.Size > 0 { - if desc.Size != bw.size { - return distribution.Descriptor{}, distribution.ErrBlobInvalidLength - } - } else { - // if provided 0 or negative length, we can assume caller doesn't know or - // care about length. - desc.Size = bw.size - } - - // TODO(stevvooe): This section is very meandering. Need to be broken down - // to be a lot more clear. - - if err := bw.resumeDigestAt(ctx, bw.size); err == nil { - canonical = bw.digester.Digest() - - if canonical.Algorithm() == desc.Digest.Algorithm() { - // Common case: client and server prefer the same canonical digest - // algorithm - currently SHA256. - verified = desc.Digest == canonical - } else { - // The client wants to use a different digest algorithm. They'll just - // have to be patient and wait for us to download and re-hash the - // uploaded content using that digest algorithm. - fullHash = true - } - } else if err == errResumableDigestNotAvailable { - // Not using resumable digests, so we need to hash the entire layer. - fullHash = true - } else { - return distribution.Descriptor{}, err - } - - if fullHash { - // a fantastic optimization: if the the written data and the size are - // the same, we don't need to read the data from the backend. This is - // because we've written the entire file in the lifecycle of the - // current instance. - if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() { - canonical = bw.digester.Digest() - verified = desc.Digest == canonical - } - - // If the check based on size fails, we fall back to the slowest of - // paths. We may be able to make the size-based check a stronger - // guarantee, so this may be defensive. - if !verified { - digester := digest.Canonical.New() - - digestVerifier, err := digest.NewDigestVerifier(desc.Digest) - if err != nil { - return distribution.Descriptor{}, err - } - - // Read the file from the backend driver and validate it. - fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Size) - if err != nil { - return distribution.Descriptor{}, err - } - defer fr.Close() - - tr := io.TeeReader(fr, digester.Hash()) - - if _, err := io.Copy(digestVerifier, tr); err != nil { - return distribution.Descriptor{}, err - } - - canonical = digester.Digest() - verified = digestVerifier.Verified() - } - } - - if !verified { - context.GetLoggerWithFields(ctx, - map[interface{}]interface{}{ - "canonical": canonical, - "provided": desc.Digest, - }, "canonical", "provided"). - Errorf("canonical digest does match provided digest") - return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ - Digest: desc.Digest, - Reason: fmt.Errorf("content does not match digest"), - } - } - - // update desc with canonical hash - desc.Digest = canonical - - if desc.MediaType == "" { - desc.MediaType = "application/octet-stream" - } - - return desc, nil -} - -// moveBlob moves the data into its final, hash-qualified destination, -// identified by dgst. The layer should be validated before commencing the -// move. -func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { - blobPath, err := pathFor(blobDataPathSpec{ - digest: desc.Digest, - }) - - if err != nil { - return err - } - - // Check for existence - if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // ensure that it doesn't exist. - default: - return err - } - } else { - // If the path exists, we can assume that the content has already - // been uploaded, since the blob storage is content-addressable. - // While it may be corrupted, detection of such corruption belongs - // elsewhere. - return nil - } - - // If no data was received, we may not actually have a file on disk. Check - // the size here and write a zero-length file to blobPath if this is the - // case. For the most part, this should only ever happen with zero-length - // tars. - if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // HACK(stevvooe): This is slightly dangerous: if we verify above, - // get a hash, then the underlying file is deleted, we risk moving - // a zero-length blob into a nonzero-length blob location. To - // prevent this horrid thing, we employ the hack of only allowing - // to this happen for the digest of an empty tar. - if desc.Digest == digest.DigestSha256EmptyTar { - return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) - } - - // We let this fail during the move below. - logrus. - WithField("upload.id", bw.ID()). - WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest") - default: - return err // unrelated error - } - } - - // TODO(stevvooe): We should also write the mediatype when executing this move. - - return bw.blobStore.driver.Move(ctx, bw.path, blobPath) -} - -// removeResources should clean up all resources associated with the upload -// instance. An error will be returned if the clean up cannot proceed. If the -// resources are already not present, no error will be returned. -func (bw *blobWriter) removeResources(ctx context.Context) error { - dataPath, err := pathFor(uploadDataPathSpec{ - name: bw.blobStore.repository.Name().Name(), - id: bw.id, - }) - - if err != nil { - return err - } - - // Resolve and delete the containing directory, which should include any - // upload related files. - dirPath := path.Dir(dataPath) - if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // already gone! - default: - // This should be uncommon enough such that returning an error - // should be okay. At this point, the upload should be mostly - // complete, but perhaps the backend became unaccessible. - context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) - return err - } - } - - return nil -} - -func (bw *blobWriter) Reader() (io.ReadCloser, error) { - // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 - try := 1 - for try <= 5 { - _, err := bw.bufferedFileWriter.driver.Stat(bw.ctx, bw.path) - if err == nil { - break - } - switch err.(type) { - case storagedriver.PathNotFoundError: - context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) - time.Sleep(1 * time.Second) - try++ - default: - return nil, err - } - } - - readCloser, err := bw.bufferedFileWriter.driver.ReadStream(bw.ctx, bw.path, 0) - if err != nil { - return nil, err - } - - return readCloser, nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go deleted file mode 100644 index 39166876..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build noresumabledigest - -package storage - -import ( - "github.com/docker/distribution/context" -) - -// resumeHashAt is a noop when resumable digest support is disabled. -func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { - return errResumableDigestNotAvailable -} - -// storeHashState is a noop when resumable digest support is disabled. -func (bw *blobWriter) storeHashState(ctx context.Context) error { - return errResumableDigestNotAvailable -} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go deleted file mode 100644 index fc62bcc4..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go +++ /dev/null @@ -1,178 +0,0 @@ -// +build !noresumabledigest - -package storage - -import ( - "fmt" - "io" - "os" - "path" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/stevvooe/resumable" - - // register resumable hashes with import - _ "github.com/stevvooe/resumable/sha256" - _ "github.com/stevvooe/resumable/sha512" -) - -// resumeDigestAt attempts to restore the state of the internal hash function -// by loading the most recent saved hash state less than or equal to the given -// offset. Any unhashed bytes remaining less than the given offset are hashed -// from the content uploaded so far. -func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { - if !bw.resumableDigestEnabled { - return errResumableDigestNotAvailable - } - - if offset < 0 { - return fmt.Errorf("cannot resume hash at negative offset: %d", offset) - } - - h, ok := bw.digester.Hash().(resumable.Hash) - if !ok { - return errResumableDigestNotAvailable - } - - if offset == int64(h.Len()) { - // State of digester is already at the requested offset. - return nil - } - - // List hash states from storage backend. - var hashStateMatch hashStateEntry - hashStates, err := bw.getStoredHashStates(ctx) - if err != nil { - return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) - } - - // Find the highest stored hashState with offset less than or equal to - // the requested offset. - for _, hashState := range hashStates { - if hashState.offset == offset { - hashStateMatch = hashState - break // Found an exact offset match. - } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { - // This offset is closer to the requested offset. - hashStateMatch = hashState - } else if hashState.offset > offset { - // Remove any stored hash state with offsets higher than this one - // as writes to this resumed hasher will make those invalid. This - // is probably okay to skip for now since we don't expect anyone to - // use the API in this way. For that reason, we don't treat an - // an error here as a fatal error, but only log it. - if err := bw.driver.Delete(ctx, hashState.path); err != nil { - logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) - } - } - } - - if hashStateMatch.offset == 0 { - // No need to load any state, just reset the hasher. - h.Reset() - } else { - storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) - if err != nil { - return err - } - - if err = h.Restore(storedState); err != nil { - return err - } - } - - // Mind the gap. - if gapLen := offset - int64(h.Len()); gapLen > 0 { - // Need to read content from the upload to catch up to the desired offset. - fr, err := newFileReader(ctx, bw.driver, bw.path, bw.size) - if err != nil { - return err - } - defer fr.Close() - - if _, err = fr.Seek(int64(h.Len()), os.SEEK_SET); err != nil { - return fmt.Errorf("unable to seek to layer reader offset %d: %s", h.Len(), err) - } - - if _, err := io.CopyN(h, fr, gapLen); err != nil { - return err - } - } - - return nil -} - -type hashStateEntry struct { - offset int64 - path string -} - -// getStoredHashStates returns a slice of hashStateEntries for this upload. -func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { - uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Name().String(), - id: bw.id, - alg: bw.digester.Digest().Algorithm(), - list: true, - }) - - if err != nil { - return nil, err - } - - paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) - if err != nil { - if _, ok := err.(storagedriver.PathNotFoundError); !ok { - return nil, err - } - // Treat PathNotFoundError as no entries. - paths = nil - } - - hashStateEntries := make([]hashStateEntry, 0, len(paths)) - - for _, p := range paths { - pathSuffix := path.Base(p) - // The suffix should be the offset. - offset, err := strconv.ParseInt(pathSuffix, 0, 64) - if err != nil { - logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) - } - - hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) - } - - return hashStateEntries, nil -} - -func (bw *blobWriter) storeHashState(ctx context.Context) error { - if !bw.resumableDigestEnabled { - return errResumableDigestNotAvailable - } - - h, ok := bw.digester.Hash().(resumable.Hash) - if !ok { - return errResumableDigestNotAvailable - } - - uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Name().String(), - id: bw.id, - alg: bw.digester.Digest().Algorithm(), - offset: int64(h.Len()), - }) - - if err != nil { - return err - } - - hashState, err := h.State() - if err != nil { - return err - } - - return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go deleted file mode 100644 index 49c2b5c3..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package memory - -import ( - "testing" - - "github.com/docker/distribution/registry/storage/cache/cachecheck" -) - -// TestInMemoryBlobInfoCache checks the in memory implementation is working -// correctly. -func TestInMemoryBlobInfoCache(t *testing.T) { - cachecheck.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/catalog.go b/vendor/github.com/docker/distribution/registry/storage/catalog.go deleted file mode 100644 index 481489f2..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/catalog.go +++ /dev/null @@ -1,66 +0,0 @@ -package storage - -import ( - "errors" - "io" - "path" - "strings" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" -) - -// ErrFinishedWalk is used when the called walk function no longer wants -// to accept any more values. This is used for pagination when the -// required number of repos have been found. -var ErrFinishedWalk = errors.New("finished walk") - -// Returns a list, or partial list, of repositories in the registry. -// Because it's a quite expensive operation, it should only be used when building up -// an initial set of repositories. -func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, errVal error) { - var foundRepos []string - - if len(repos) == 0 { - return 0, errors.New("no space in slice") - } - - root, err := pathFor(repositoriesRootPathSpec{}) - if err != nil { - return 0, err - } - - err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - - // lop the base path off - repoPath := filePath[len(root)+1:] - - _, file := path.Split(repoPath) - if file == "_layers" { - repoPath = strings.TrimSuffix(repoPath, "/_layers") - if repoPath > last { - foundRepos = append(foundRepos, repoPath) - } - return ErrSkipDir - } else if strings.HasPrefix(file, "_") { - return ErrSkipDir - } - - // if we've filled our array, no need to walk any further - if len(foundRepos) == len(repos) { - return ErrFinishedWalk - } - - return nil - }) - - n = copy(repos, foundRepos) - - // Signal that we have no more entries by setting EOF - if len(foundRepos) <= len(repos) && err != ErrFinishedWalk { - errVal = io.EOF - } - - return n, errVal -} diff --git a/vendor/github.com/docker/distribution/registry/storage/catalog_test.go b/vendor/github.com/docker/distribution/registry/storage/catalog_test.go deleted file mode 100644 index eb062c5b..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/catalog_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package storage - -import ( - "io" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -type setupEnv struct { - ctx context.Context - driver driver.StorageDriver - expected []string - registry distribution.Namespace -} - -func setupFS(t *testing.T) *setupEnv { - d := inmemory.New() - c := []byte("") - ctx := context.Background() - registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - rootpath, _ := pathFor(repositoriesRootPathSpec{}) - - repos := []string{ - "/foo/a/_layers/1", - "/foo/b/_layers/2", - "/bar/c/_layers/3", - "/bar/d/_layers/4", - "/foo/d/in/_layers/5", - "/an/invalid/repo", - "/bar/d/_layers/ignored/dir/6", - } - - for _, repo := range repos { - if err := d.PutContent(ctx, rootpath+repo, c); err != nil { - t.Fatalf("Unable to put to inmemory fs") - } - } - - expected := []string{ - "bar/c", - "bar/d", - "foo/a", - "foo/b", - "foo/d/in", - } - - return &setupEnv{ - ctx: ctx, - driver: d, - expected: expected, - registry: registry, - } -} - -func TestCatalog(t *testing.T) { - env := setupFS(t) - - p := make([]string, 50) - - numFilled, err := env.registry.Repositories(env.ctx, p, "") - - if !testEq(p, env.expected, numFilled) { - t.Errorf("Expected catalog repos err") - } - - if err != io.EOF { - t.Errorf("Catalog has more values which we aren't expecting") - } -} - -func TestCatalogInParts(t *testing.T) { - env := setupFS(t) - - chunkLen := 2 - p := make([]string, chunkLen) - - numFilled, err := env.registry.Repositories(env.ctx, p, "") - if err == io.EOF || numFilled != len(p) { - t.Errorf("Expected more values in catalog") - } - - if !testEq(p, env.expected[0:chunkLen], numFilled) { - t.Errorf("Expected catalog first chunk err") - } - - lastRepo := p[len(p)-1] - numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) - - if err == io.EOF || numFilled != len(p) { - t.Errorf("Expected more values in catalog") - } - - if !testEq(p, env.expected[chunkLen:chunkLen*2], numFilled) { - t.Errorf("Expected catalog second chunk err") - } - - lastRepo = p[len(p)-1] - numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) - - if err != io.EOF { - t.Errorf("Catalog has more values which we aren't expecting") - } - - if !testEq(p, env.expected[chunkLen*2:chunkLen*3-1], numFilled) { - t.Errorf("Expected catalog third chunk err") - } - -} - -func testEq(a, b []string, size int) bool { - for cnt := 0; cnt < size-1; cnt++ { - if a[cnt] != b[cnt] { - return false - } - } - return true -} diff --git a/vendor/github.com/docker/distribution/registry/storage/doc.go b/vendor/github.com/docker/distribution/registry/storage/doc.go deleted file mode 100644 index 387d9234..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package storage contains storage services for use in the registry -// application. It should be considered an internal package, as of Go 1.4. -package storage diff --git a/vendor/github.com/docker/distribution/registry/storage/filereader.go b/vendor/github.com/docker/distribution/registry/storage/filereader.go deleted file mode 100644 index b3a5f520..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/filereader.go +++ /dev/null @@ -1,177 +0,0 @@ -package storage - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// TODO(stevvooe): Set an optimal buffer size here. We'll have to -// understand the latency characteristics of the underlying network to -// set this correctly, so we may want to leave it to the driver. For -// out of process drivers, we'll have to optimize this buffer size for -// local communication. -const fileReaderBufferSize = 4 << 20 - -// remoteFileReader provides a read seeker interface to files stored in -// storagedriver. Used to implement part of layer interface and will be used -// to implement read side of LayerUpload. -type fileReader struct { - driver storagedriver.StorageDriver - - ctx context.Context - - // identifying fields - path string - size int64 // size is the total size, must be set. - - // mutable fields - rc io.ReadCloser // remote read closer - brd *bufio.Reader // internal buffered io - offset int64 // offset is the current read offset - err error // terminal error, if set, reader is closed -} - -// newFileReader initializes a file reader for the remote file. The reader -// takes on the size and path that must be determined externally with a stat -// call. The reader operates optimistically, assuming that the file is already -// there. -func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) { - return &fileReader{ - ctx: ctx, - driver: driver, - path: path, - size: size, - }, nil -} - -func (fr *fileReader) Read(p []byte) (n int, err error) { - if fr.err != nil { - return 0, fr.err - } - - rd, err := fr.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - fr.offset += int64(n) - - // Simulate io.EOR error if we reach filesize. - if err == nil && fr.offset >= fr.size { - err = io.EOF - } - - return n, err -} - -func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { - if fr.err != nil { - return 0, fr.err - } - - var err error - newOffset := fr.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset = fr.size + int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - if newOffset < 0 { - err = fmt.Errorf("cannot seek to negative position") - } else { - if fr.offset != newOffset { - fr.reset() - } - - // No problems, set the offset. - fr.offset = newOffset - } - - return fr.offset, err -} - -func (fr *fileReader) Close() error { - return fr.closeWithErr(fmt.Errorf("fileReader: closed")) -} - -// reader prepares the current reader at the lrs offset, ensuring its buffered -// and ready to go. -func (fr *fileReader) reader() (io.Reader, error) { - if fr.err != nil { - return nil, fr.err - } - - if fr.rc != nil { - return fr.brd, nil - } - - // If we don't have a reader, open one up. - rc, err := fr.driver.ReadStream(fr.ctx, fr.path, fr.offset) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // NOTE(stevvooe): If the path is not found, we simply return a - // reader that returns io.EOF. However, we do not set fr.rc, - // allowing future attempts at getting a reader to possibly - // succeed if the file turns up later. - return ioutil.NopCloser(bytes.NewReader([]byte{})), nil - default: - return nil, err - } - } - - fr.rc = rc - - if fr.brd == nil { - fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) - } else { - fr.brd.Reset(fr.rc) - } - - return fr.brd, nil -} - -// resetReader resets the reader, forcing the read method to open up a new -// connection and rebuild the buffered reader. This should be called when the -// offset and the reader will become out of sync, such as during a seek -// operation. -func (fr *fileReader) reset() { - if fr.err != nil { - return - } - if fr.rc != nil { - fr.rc.Close() - fr.rc = nil - } -} - -func (fr *fileReader) closeWithErr(err error) error { - if fr.err != nil { - return fr.err - } - - fr.err = err - - // close and release reader chain - if fr.rc != nil { - fr.rc.Close() - } - - fr.rc = nil - fr.brd = nil - - return fr.err -} diff --git a/vendor/github.com/docker/distribution/registry/storage/filereader_test.go b/vendor/github.com/docker/distribution/registry/storage/filereader_test.go deleted file mode 100644 index 774a864b..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/filereader_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package storage - -import ( - "bytes" - "crypto/rand" - "io" - mrand "math/rand" - "os" - "testing" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -func TestSimpleRead(t *testing.T) { - ctx := context.Background() - content := make([]byte, 1<<20) - n, err := rand.Read(content) - if err != nil { - t.Fatalf("unexpected error building random data: %v", err) - } - - if n != len(content) { - t.Fatalf("random read didn't fill buffer") - } - - dgst, err := digest.FromReader(bytes.NewReader(content)) - if err != nil { - t.Fatalf("unexpected error digesting random content: %v", err) - } - - driver := inmemory.New() - path := "/random" - - if err := driver.PutContent(ctx, path, content); err != nil { - t.Fatalf("error putting patterned content: %v", err) - } - - fr, err := newFileReader(ctx, driver, path, int64(len(content))) - if err != nil { - t.Fatalf("error allocating file reader: %v", err) - } - - verifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - t.Fatalf("error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify read data") - } -} - -func TestFileReaderSeek(t *testing.T) { - driver := inmemory.New() - pattern := "01234567890ab" // prime length block - repititions := 1024 - path := "/patterned" - content := bytes.Repeat([]byte(pattern), repititions) - ctx := context.Background() - - if err := driver.PutContent(ctx, path, content); err != nil { - t.Fatalf("error putting patterned content: %v", err) - } - - fr, err := newFileReader(ctx, driver, path, int64(len(content))) - - if err != nil { - t.Fatalf("unexpected error creating file reader: %v", err) - } - - // Seek all over the place, in blocks of pattern size and make sure we get - // the right data. - for _, repitition := range mrand.Perm(repititions - 1) { - targetOffset := int64(len(pattern) * repitition) - // Seek to a multiple of pattern size and read pattern size bytes - offset, err := fr.Seek(targetOffset, os.SEEK_SET) - if err != nil { - t.Fatalf("unexpected error seeking: %v", err) - } - - if offset != targetOffset { - t.Fatalf("did not seek to correct offset: %d != %d", offset, targetOffset) - } - - p := make([]byte, len(pattern)) - - n, err := fr.Read(p) - if err != nil { - t.Fatalf("error reading pattern: %v", err) - } - - if n != len(pattern) { - t.Fatalf("incorrect read length: %d != %d", n, len(pattern)) - } - - if string(p) != pattern { - t.Fatalf("incorrect read content: %q != %q", p, pattern) - } - - // Check offset - current, err := fr.Seek(0, os.SEEK_CUR) - if err != nil { - t.Fatalf("error checking current offset: %v", err) - } - - if current != targetOffset+int64(len(pattern)) { - t.Fatalf("unexpected offset after read: %v", err) - } - } - - start, err := fr.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatalf("error seeking to start: %v", err) - } - - if start != 0 { - t.Fatalf("expected to seek to start: %v != 0", start) - } - - end, err := fr.Seek(0, os.SEEK_END) - if err != nil { - t.Fatalf("error checking current offset: %v", err) - } - - if end != int64(len(content)) { - t.Fatalf("expected to seek to end: %v != %v", end, len(content)) - } - - // 4. Seek before start, ensure error. - - // seek before start - before, err := fr.Seek(-1, os.SEEK_SET) - if err == nil { - t.Fatalf("error expected, returned offset=%v", before) - } - - // 5. Seek after end, - after, err := fr.Seek(1, os.SEEK_END) - if err != nil { - t.Fatalf("unexpected error expected, returned offset=%v", after) - } - - p := make([]byte, 16) - n, err := fr.Read(p) - - if n != 0 { - t.Fatalf("bytes reads %d != %d", n, 0) - } - - if err != io.EOF { - t.Fatalf("expected io.EOF, got %v", err) - } -} - -// TestFileReaderNonExistentFile ensures the reader behaves as expected with a -// missing or zero-length remote file. While the file may not exist, the -// reader should not error out on creation and should return 0-bytes from the -// read method, with an io.EOF error. -func TestFileReaderNonExistentFile(t *testing.T) { - driver := inmemory.New() - fr, err := newFileReader(context.Background(), driver, "/doesnotexist", 10) - if err != nil { - t.Fatalf("unexpected error initializing reader: %v", err) - } - - var buf [1024]byte - - n, err := fr.Read(buf[:]) - if n != 0 { - t.Fatalf("non-zero byte read reported: %d != 0", n) - } - - if err != io.EOF { - t.Fatalf("read on missing file should return io.EOF, got %v", err) - } -} - -// TestLayerReadErrors covers the various error return type for different -// conditions that can arise when reading a layer. -func TestFileReaderErrors(t *testing.T) { - // TODO(stevvooe): We need to cover error return types, driven by the - // errors returned via the HTTP API. For now, here is a incomplete list: - // - // 1. Layer Not Found: returned when layer is not found or access is - // denied. - // 2. Layer Unavailable: returned when link references are unresolved, - // but layer is known to the registry. - // 3. Layer Invalid: This may more split into more errors, but should be - // returned when name or tarsum does not reference a valid error. We - // may also need something to communication layer verification errors - // for the inline tarsum check. - // 4. Timeout: timeouts to backend. Need to better understand these - // failure cases and how the storage driver propagates these errors - // up the stack. -} diff --git a/vendor/github.com/docker/distribution/registry/storage/filewriter.go b/vendor/github.com/docker/distribution/registry/storage/filewriter.go deleted file mode 100644 index 529fa673..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/filewriter.go +++ /dev/null @@ -1,180 +0,0 @@ -package storage - -import ( - "bufio" - "bytes" - "fmt" - "io" - "os" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -const ( - fileWriterBufferSize = 5 << 20 -) - -// fileWriter implements a remote file writer backed by a storage driver. -type fileWriter struct { - driver storagedriver.StorageDriver - - ctx context.Context - - // identifying fields - path string - - // mutable fields - size int64 // size of the file, aka the current end - offset int64 // offset is the current write offset - err error // terminal error, if set, reader is closed -} - -type bufferedFileWriter struct { - fileWriter - bw *bufio.Writer -} - -// fileWriterInterface makes the desired io compliant interface that the -// filewriter should implement. -type fileWriterInterface interface { - io.WriteSeeker - io.ReaderFrom - io.Closer -} - -var _ fileWriterInterface = &fileWriter{} - -// newFileWriter returns a prepared fileWriter for the driver and path. This -// could be considered similar to an "open" call on a regular filesystem. -func newFileWriter(ctx context.Context, driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) { - fw := fileWriter{ - driver: driver, - path: path, - ctx: ctx, - } - - if fi, err := driver.Stat(ctx, path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // ignore, offset is zero - default: - return nil, err - } - } else { - if fi.IsDir() { - return nil, fmt.Errorf("cannot write to a directory") - } - - fw.size = fi.Size() - } - - buffered := bufferedFileWriter{ - fileWriter: fw, - } - buffered.bw = bufio.NewWriterSize(&buffered.fileWriter, fileWriterBufferSize) - - return &buffered, nil -} - -// wraps the fileWriter.Write method to buffer small writes -func (bfw *bufferedFileWriter) Write(p []byte) (int, error) { - return bfw.bw.Write(p) -} - -// wraps fileWriter.Close to ensure the buffer is flushed -// before we close the writer. -func (bfw *bufferedFileWriter) Close() (err error) { - if err = bfw.Flush(); err != nil { - return err - } - err = bfw.fileWriter.Close() - return err -} - -// wraps fileWriter.Seek to ensure offset is handled -// correctly in respect to pending data in the buffer -func (bfw *bufferedFileWriter) Seek(offset int64, whence int) (int64, error) { - if err := bfw.Flush(); err != nil { - return 0, err - } - return bfw.fileWriter.Seek(offset, whence) -} - -// wraps bufio.Writer.Flush to allow intermediate flushes -// of the bufferedFileWriter -func (bfw *bufferedFileWriter) Flush() error { - return bfw.bw.Flush() -} - -// Write writes the buffer p at the current write offset. -func (fw *fileWriter) Write(p []byte) (n int, err error) { - nn, err := fw.ReadFrom(bytes.NewReader(p)) - return int(nn), err -} - -// ReadFrom reads reader r until io.EOF writing the contents at the current -// offset. -func (fw *fileWriter) ReadFrom(r io.Reader) (n int64, err error) { - if fw.err != nil { - return 0, fw.err - } - - nn, err := fw.driver.WriteStream(fw.ctx, fw.path, fw.offset, r) - - // We should forward the offset, whether or not there was an error. - // Basically, we keep the filewriter in sync with the reader's head. If an - // error is encountered, the whole thing should be retried but we proceed - // from an expected offset, even if the data didn't make it to the - // backend. - fw.offset += nn - - if fw.offset > fw.size { - fw.size = fw.offset - } - - return nn, err -} - -// Seek moves the write position do the requested offest based on the whence -// argument, which can be os.SEEK_CUR, os.SEEK_END, or os.SEEK_SET. -func (fw *fileWriter) Seek(offset int64, whence int) (int64, error) { - if fw.err != nil { - return 0, fw.err - } - - var err error - newOffset := fw.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset = fw.size + int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - if newOffset < 0 { - err = fmt.Errorf("cannot seek to negative position") - } else { - // No problems, set the offset. - fw.offset = newOffset - } - - return fw.offset, err -} - -// Close closes the fileWriter for writing. -// Calling it once is valid and correct and it will -// return a nil error. Calling it subsequent times will -// detect that fw.err has been set and will return the error. -func (fw *fileWriter) Close() error { - if fw.err != nil { - return fw.err - } - - fw.err = fmt.Errorf("filewriter@%v: closed", fw.path) - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/filewriter_test.go b/vendor/github.com/docker/distribution/registry/storage/filewriter_test.go deleted file mode 100644 index 858b0327..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/filewriter_test.go +++ /dev/null @@ -1,262 +0,0 @@ -package storage - -import ( - "bytes" - "crypto/rand" - "io" - "os" - "testing" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -// TestSimpleWrite takes the fileWriter through common write operations -// ensuring data integrity. -func TestSimpleWrite(t *testing.T) { - content := make([]byte, 1<<20) - n, err := rand.Read(content) - if err != nil { - t.Fatalf("unexpected error building random data: %v", err) - } - - if n != len(content) { - t.Fatalf("random read did't fill buffer") - } - - dgst, err := digest.FromReader(bytes.NewReader(content)) - if err != nil { - t.Fatalf("unexpected error digesting random content: %v", err) - } - - driver := inmemory.New() - path := "/random" - ctx := context.Background() - - fw, err := newFileWriter(ctx, driver, path) - if err != nil { - t.Fatalf("unexpected error creating fileWriter: %v", err) - } - defer fw.Close() - - n, err = fw.Write(content) - if err != nil { - t.Fatalf("unexpected error writing content: %v", err) - } - fw.Flush() - - if n != len(content) { - t.Fatalf("unexpected write length: %d != %d", n, len(content)) - } - - fr, err := newFileReader(ctx, driver, path, int64(len(content))) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - verifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify write data") - } - - // Check the seek position is equal to the content length - end, err := fw.Seek(0, os.SEEK_END) - if err != nil { - t.Fatalf("unexpected error seeking: %v", err) - } - - if end != int64(len(content)) { - t.Fatalf("write did not advance offset: %d != %d", end, len(content)) - } - - // Double the content - doubled := append(content, content...) - doubledgst, err := digest.FromReader(bytes.NewReader(doubled)) - if err != nil { - t.Fatalf("unexpected error digesting doubled content: %v", err) - } - - nn, err := fw.ReadFrom(bytes.NewReader(content)) - if err != nil { - t.Fatalf("unexpected error doubling content: %v", err) - } - - if nn != int64(len(content)) { - t.Fatalf("writeat was short: %d != %d", n, len(content)) - } - - fr, err = newFileReader(ctx, driver, path, int64(len(doubled))) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - verifier, err = digest.NewDigestVerifier(doubledgst) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify write data") - } - - // Check that Write updated the offset. - end, err = fw.Seek(0, os.SEEK_END) - if err != nil { - t.Fatalf("unexpected error seeking: %v", err) - } - - if end != int64(len(doubled)) { - t.Fatalf("write did not advance offset: %d != %d", end, len(doubled)) - } - - // Now, we copy from one path to another, running the data through the - // fileReader to fileWriter, rather than the driver.Move command to ensure - // everything is working correctly. - fr, err = newFileReader(ctx, driver, path, int64(len(doubled))) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - fw, err = newFileWriter(ctx, driver, "/copied") - if err != nil { - t.Fatalf("unexpected error creating fileWriter: %v", err) - } - defer fw.Close() - - nn, err = io.Copy(fw, fr) - if err != nil { - t.Fatalf("unexpected error copying data: %v", err) - } - - if nn != int64(len(doubled)) { - t.Fatalf("unexpected copy length: %d != %d", nn, len(doubled)) - } - - fr, err = newFileReader(ctx, driver, "/copied", int64(len(doubled))) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - verifier, err = digest.NewDigestVerifier(doubledgst) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify write data") - } -} - -func TestBufferedFileWriter(t *testing.T) { - ctx := context.Background() - writer, err := newFileWriter(ctx, inmemory.New(), "/random") - - if err != nil { - t.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) - } - - // write one byte and ensure the offset hasn't been incremented. - // offset will only get incremented when the buffer gets flushed - short := []byte{byte(1)} - - writer.Write(short) - - if writer.offset > 0 { - t.Fatalf("WriteStream called prematurely") - } - - // write enough data to cause the buffer to flush and confirm - // the offset has been incremented - long := make([]byte, fileWriterBufferSize) - _, err = rand.Read(long) - if err != nil { - t.Fatalf("unexpected error building random data: %v", err) - } - for i := range long { - long[i] = byte(i) - } - writer.Write(long) - writer.Close() - if writer.offset != (fileWriterBufferSize + 1) { - t.Fatalf("WriteStream not called when buffer capacity reached") - } -} - -func BenchmarkFileWriter(b *testing.B) { - b.StopTimer() // not sure how long setup above will take - for i := 0; i < b.N; i++ { - // Start basic fileWriter initialization - fw := fileWriter{ - driver: inmemory.New(), - path: "/random", - } - ctx := context.Background() - if fi, err := fw.driver.Stat(ctx, fw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // ignore, offset is zero - default: - b.Fatalf("Failed to initialize fileWriter: %v", err.Error()) - } - } else { - if fi.IsDir() { - b.Fatalf("Cannot write to a directory") - } - - fw.size = fi.Size() - } - - randomBytes := make([]byte, 1<<20) - _, err := rand.Read(randomBytes) - if err != nil { - b.Fatalf("unexpected error building random data: %v", err) - } - // End basic file writer initialization - - b.StartTimer() - for j := 0; j < 100; j++ { - fw.Write(randomBytes) - } - b.StopTimer() - } -} - -func BenchmarkBufferedFileWriter(b *testing.B) { - b.StopTimer() // not sure how long setup above will take - ctx := context.Background() - for i := 0; i < b.N; i++ { - bfw, err := newFileWriter(ctx, inmemory.New(), "/random") - - if err != nil { - b.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) - } - - randomBytes := make([]byte, 1<<20) - _, err = rand.Read(randomBytes) - if err != nil { - b.Fatalf("unexpected error building random data: %v", err) - } - - b.StartTimer() - for j := 0; j < 100; j++ { - bfw.Write(randomBytes) - } - b.StopTimer() - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go deleted file mode 100644 index 0c0c622c..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go +++ /dev/null @@ -1,415 +0,0 @@ -package storage - -import ( - "fmt" - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/uuid" -) - -// linkPathFunc describes a function that can resolve a link based on the -// repository name and digest. -type linkPathFunc func(name string, dgst digest.Digest) (string, error) - -// linkedBlobStore provides a full BlobService that namespaces the blobs to a -// given repository. Effectively, it manages the links in a given repository -// that grant access to the global blob store. -type linkedBlobStore struct { - *blobStore - registry *registry - blobServer distribution.BlobServer - blobAccessController distribution.BlobDescriptorService - repository distribution.Repository - ctx context.Context // only to be used where context can't come through method args - deleteEnabled bool - resumableDigestEnabled bool - - // linkPathFns specifies one or more path functions allowing one to - // control the repository blob link set to which the blob store - // dispatches. This is required because manifest and layer blobs have not - // yet been fully merged. At some point, this functionality should be - // removed an the blob links folder should be merged. The first entry is - // treated as the "canonical" link location and will be used for writes. - linkPathFns []linkPathFunc -} - -var _ distribution.BlobStore = &linkedBlobStore{} - -func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return lbs.blobAccessController.Stat(ctx, dgst) -} - -func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - canonical, err := lbs.Stat(ctx, dgst) // access check - if err != nil { - return nil, err - } - - return lbs.blobStore.Get(ctx, canonical.Digest) -} - -func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - canonical, err := lbs.Stat(ctx, dgst) // access check - if err != nil { - return nil, err - } - - return lbs.blobStore.Open(ctx, canonical.Digest) -} - -func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - canonical, err := lbs.Stat(ctx, dgst) // access check - if err != nil { - return err - } - - if canonical.MediaType != "" { - // Set the repository local content type. - w.Header().Set("Content-Type", canonical.MediaType) - } - - return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest) -} - -func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - dgst := digest.FromBytes(p) - // Place the data in the blob store first. - desc, err := lbs.blobStore.Put(ctx, mediaType, p) - if err != nil { - context.GetLogger(ctx).Errorf("error putting into main store: %v", err) - return distribution.Descriptor{}, err - } - - if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil { - return distribution.Descriptor{}, err - } - - // TODO(stevvooe): Write out mediatype if incoming differs from what is - // returned by Put above. Note that we should allow updates for a given - // repository. - - return desc, lbs.linkBlob(ctx, desc) -} - -// createOptions is a collection of blob creation modifiers relevant to general -// blob storage intended to be configured by the BlobCreateOption.Apply method. -type createOptions struct { - Mount struct { - ShouldMount bool - From reference.Canonical - } -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*createOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -// Writer begins a blob write session, returning a handle. -func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") - - var opts createOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - if opts.Mount.ShouldMount { - desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest()) - if err == nil { - // Mount successful, no need to initiate an upload session - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - } - } - - uuid := uuid.Generate().String() - startedAt := time.Now().UTC() - - path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Name().Name(), - id: uuid, - }) - - if err != nil { - return nil, err - } - - startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Name().Name(), - id: uuid, - }) - - if err != nil { - return nil, err - } - - // Write a startedat file for this upload - if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { - return nil, err - } - - return lbs.newBlobUpload(ctx, uuid, path, startedAt) -} - -func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") - - startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Name().Name(), - id: id, - }) - - if err != nil { - return nil, err - } - - startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath) - if err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return nil, distribution.ErrBlobUploadUnknown - default: - return nil, err - } - } - - startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) - if err != nil { - return nil, err - } - - path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Name().Name(), - id: id, - }) - - if err != nil { - return nil, err - } - - return lbs.newBlobUpload(ctx, id, path, startedAt) -} - -func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { - if !lbs.deleteEnabled { - return distribution.ErrUnsupported - } - - // Ensure the blob is available for deletion - _, err := lbs.blobAccessController.Stat(ctx, dgst) - if err != nil { - return err - } - - err = lbs.blobAccessController.Clear(ctx, dgst) - if err != nil { - return err - } - - return nil -} - -func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { - repo, err := lbs.registry.Repository(ctx, sourceRepo) - if err != nil { - return distribution.Descriptor{}, err - } - stat, err := repo.Blobs(ctx).Stat(ctx, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - - desc := distribution.Descriptor{ - Size: stat.Size, - - // NOTE(stevvooe): The central blob store firewalls media types from - // other users. The caller should look this up and override the value - // for the specific repository. - MediaType: "application/octet-stream", - Digest: dgst, - } - return desc, lbs.linkBlob(ctx, desc) -} - -// newBlobUpload allocates a new upload controller with the given state. -func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time) (distribution.BlobWriter, error) { - fw, err := newFileWriter(ctx, lbs.driver, path) - if err != nil { - return nil, err - } - - bw := &blobWriter{ - blobStore: lbs, - id: uuid, - startedAt: startedAt, - digester: digest.Canonical.New(), - bufferedFileWriter: *fw, - resumableDigestEnabled: lbs.resumableDigestEnabled, - } - - return bw, nil -} - -// linkBlob links a valid, written blob into the registry under the named -// repository for the upload controller. -func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error { - dgsts := append([]digest.Digest{canonical.Digest}, aliases...) - - // TODO(stevvooe): Need to write out mediatype for only canonical hash - // since we don't care about the aliases. They are generally unused except - // for tarsum but those versions don't care about mediatype. - - // Don't make duplicate links. - seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) - - // only use the first link - linkPathFn := lbs.linkPathFns[0] - - for _, dgst := range dgsts { - if _, seen := seenDigests[dgst]; seen { - continue - } - seenDigests[dgst] = struct{}{} - - blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) - if err != nil { - return err - } - - if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil { - return err - } - } - - return nil -} - -type linkedBlobStatter struct { - *blobStore - repository distribution.Repository - - // linkPathFns specifies one or more path functions allowing one to - // control the repository blob link set to which the blob store - // dispatches. This is required because manifest and layer blobs have not - // yet been fully merged. At some point, this functionality should be - // removed an the blob links folder should be merged. The first entry is - // treated as the "canonical" link location and will be used for writes. - linkPathFns []linkPathFunc -} - -var _ distribution.BlobDescriptorService = &linkedBlobStatter{} - -func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - var ( - resolveErr error - target digest.Digest - ) - - // try the many link path functions until we get success or an error that - // is not PathNotFoundError. - for _, linkPathFn := range lbs.linkPathFns { - var err error - target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) - - if err == nil { - break // success! - } - - switch err := err.(type) { - case driver.PathNotFoundError: - resolveErr = distribution.ErrBlobUnknown // move to the next linkPathFn, saving the error - default: - return distribution.Descriptor{}, err - } - } - - if resolveErr != nil { - return distribution.Descriptor{}, resolveErr - } - - if target != dgst { - // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256. - context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) - } - - // TODO(stevvooe): Look up repository local mediatype and replace that on - // the returned descriptor. - - return lbs.blobStore.statter.Stat(ctx, target) -} - -func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { - // clear any possible existence of a link described in linkPathFns - for _, linkPathFn := range lbs.linkPathFns { - blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) - if err != nil { - return err - } - - err = lbs.blobStore.driver.Delete(ctx, blobLinkPath) - if err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - continue // just ignore this error and continue - default: - return err - } - } - } - - return nil -} - -// resolveTargetWithFunc allows us to read a link to a resource with different -// linkPathFuncs to let us try a few different paths before returning not -// found. -func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { - blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) - if err != nil { - return "", err - } - - return lbs.blobStore.readlink(ctx, blobLinkPath) -} - -func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - // The canonical descriptor for a blob is set at the commit phase of upload - return nil -} - -// blobLinkPath provides the path to the blob link, also known as layers. -func blobLinkPath(name string, dgst digest.Digest) (string, error) { - return pathFor(layerLinkPathSpec{name: name, digest: dgst}) -} - -// manifestRevisionLinkPath provides the path to the manifest revision link. -func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) { - return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst}) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go b/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go deleted file mode 100644 index 42027d13..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go +++ /dev/null @@ -1,96 +0,0 @@ -package storage - -import ( - "fmt" - - "encoding/json" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/manifestlist" -) - -// manifestListHandler is a ManifestHandler that covers schema2 manifest lists. -type manifestListHandler struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context -} - -var _ ManifestHandler = &manifestListHandler{} - -func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") - - var m manifestlist.DeserializedManifestList - if err := json.Unmarshal(content, &m); err != nil { - return nil, err - } - - return &m, nil -} - -func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") - - m, ok := manifestList.(*manifestlist.DeserializedManifestList) - if !ok { - return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList) - } - - if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { - return "", err - } - - mt, payload, err := m.Payload() - if err != nil { - return "", err - } - - revision, err := ms.blobStore.Put(ctx, mt, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return "", err - } - - // Link the revision into the repository. - if err := ms.blobStore.linkBlob(ctx, revision); err != nil { - return "", err - } - - return revision.Digest, nil -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. As a policy, the registry only tries to -// store valid content, leaving trust policies of that content up to -// consumers. -func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error { - var errs distribution.ErrManifestVerification - - if !skipDependencyVerification { - // This manifest service is different from the blob service - // returned by Blob. It uses a linked blob store to ensure that - // only manifests are accessible. - manifestService, err := ms.repository.Manifests(ctx) - if err != nil { - return err - } - - for _, manifestDescriptor := range mnfst.References() { - exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest) - if err != nil && err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - if err != nil || !exists { - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest}) - } - } - } - if len(errs) != 0 { - return errs - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/manifeststore.go b/vendor/github.com/docker/distribution/registry/storage/manifeststore.go deleted file mode 100644 index 33c0c351..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/manifeststore.go +++ /dev/null @@ -1,134 +0,0 @@ -package storage - -import ( - "fmt" - - "encoding/json" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" -) - -// A ManifestHandler gets and puts manifests of a particular type. -type ManifestHandler interface { - // Unmarshal unmarshals the manifest from a byte slice. - Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) - - // Put creates or updates the given manifest returning the manifest digest. - Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) -} - -// SkipLayerVerification allows a manifest to be Put before its -// layers are on the filesystem -func SkipLayerVerification() distribution.ManifestServiceOption { - return skipLayerOption{} -} - -type skipLayerOption struct{} - -func (o skipLayerOption) Apply(m distribution.ManifestService) error { - if ms, ok := m.(*manifestStore); ok { - ms.skipDependencyVerification = true - return nil - } - return fmt.Errorf("skip layer verification only valid for manifestStore") -} - -type manifestStore struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context - - skipDependencyVerification bool - - schema1Handler ManifestHandler - schema2Handler ManifestHandler - manifestListHandler ManifestHandler -} - -var _ distribution.ManifestService = &manifestStore{} - -func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") - - _, err := ms.blobStore.Stat(ms.ctx, dgst) - if err != nil { - if err == distribution.ErrBlobUnknown { - return false, nil - } - - return false, err - } - - return true, nil -} - -func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") - - // TODO(stevvooe): Need to check descriptor from above to ensure that the - // mediatype is as we expect for the manifest store. - - content, err := ms.blobStore.Get(ctx, dgst) - if err != nil { - if err == distribution.ErrBlobUnknown { - return nil, distribution.ErrManifestUnknownRevision{ - Name: ms.repository.Name().Name(), - Revision: dgst, - } - } - - return nil, err - } - - var versioned manifest.Versioned - if err = json.Unmarshal(content, &versioned); err != nil { - return nil, err - } - - switch versioned.SchemaVersion { - case 1: - return ms.schema1Handler.Unmarshal(ctx, dgst, content) - case 2: - // This can be an image manifest or a manifest list - switch versioned.MediaType { - case schema2.MediaTypeManifest: - return ms.schema2Handler.Unmarshal(ctx, dgst, content) - case manifestlist.MediaTypeManifestList: - return ms.manifestListHandler.Unmarshal(ctx, dgst, content) - default: - return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} - } - } - - return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion) -} - -func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") - - switch manifest.(type) { - case *schema1.SignedManifest: - return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) - case *schema2.DeserializedManifest: - return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) - case *manifestlist.DeserializedManifestList: - return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification) - } - - return "", fmt.Errorf("unrecognized manifest type %T", manifest) -} - -// Delete removes the revision of the specified manfiest. -func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") - return ms.blobStore.Delete(ctx, dgst) -} - -func (ms *manifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - return 0, distribution.ErrUnsupported -} diff --git a/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go b/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go deleted file mode 100644 index 7885c466..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go +++ /dev/null @@ -1,401 +0,0 @@ -package storage - -import ( - "bytes" - "io" - "reflect" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/testutil" - "github.com/docker/libtrust" -) - -type manifestStoreTestEnv struct { - ctx context.Context - driver driver.StorageDriver - registry distribution.Namespace - repository distribution.Repository - name reference.Named - tag string -} - -func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string) *manifestStoreTestEnv { - ctx := context.Background() - driver := inmemory.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider( - memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - - repo, err := registry.Repository(ctx, name) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - - return &manifestStoreTestEnv{ - ctx: ctx, - driver: driver, - registry: registry, - repository: repo, - name: name, - tag: tag, - } -} - -func TestManifestStorage(t *testing.T) { - repoName, _ := reference.ParseNamed("foo/bar") - env := newManifestStoreTestEnv(t, repoName, "thetag") - ctx := context.Background() - ms, err := env.repository.Manifests(ctx) - if err != nil { - t.Fatal(err) - } - - m := schema1.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: env.name.Name(), - Tag: env.tag, - } - - // Build up some test layers and add them to the manifest, saving the - // readseekers for upload later. - testLayers := map[digest.Digest]io.ReadSeeker{} - for i := 0; i < 2; i++ { - rs, ds, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("unexpected error generating test layer file") - } - dgst := digest.Digest(ds) - - testLayers[digest.Digest(dgst)] = rs - m.FSLayers = append(m.FSLayers, schema1.FSLayer{ - BlobSum: dgst, - }) - m.History = append(m.History, schema1.History{ - V1Compatibility: "", - }) - - } - - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key: %v", err) - } - - sm, merr := schema1.Sign(&m, pk) - if merr != nil { - t.Fatalf("error signing manifest: %v", err) - } - - _, err = ms.Put(ctx, sm) - if err == nil { - t.Fatalf("expected errors putting manifest with full verification") - } - - switch err := err.(type) { - case distribution.ErrManifestVerification: - if len(err) != 2 { - t.Fatalf("expected 2 verification errors: %#v", err) - } - - for _, err := range err { - if _, ok := err.(distribution.ErrManifestBlobUnknown); !ok { - t.Fatalf("unexpected error type: %v", err) - } - } - default: - t.Fatalf("unexpected error verifying manifest: %v", err) - } - - // Now, upload the layers that were missing! - for dgst, rs := range testLayers { - wr, err := env.repository.Blobs(env.ctx).Create(env.ctx) - if err != nil { - t.Fatalf("unexpected error creating test upload: %v", err) - } - - if _, err := io.Copy(wr, rs); err != nil { - t.Fatalf("unexpected error copying to upload: %v", err) - } - - if _, err := wr.Commit(env.ctx, distribution.Descriptor{Digest: dgst}); err != nil { - t.Fatalf("unexpected error finishing upload: %v", err) - } - } - - var manifestDigest digest.Digest - if manifestDigest, err = ms.Put(ctx, sm); err != nil { - t.Fatalf("unexpected error putting manifest: %v", err) - } - - exists, err := ms.Exists(ctx, manifestDigest) - if err != nil { - t.Fatalf("unexpected error checking manifest existence: %#v", err) - } - - if !exists { - t.Fatalf("manifest should exist") - } - - fromStore, err := ms.Get(ctx, manifestDigest) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - - fetchedManifest, ok := fromStore.(*schema1.SignedManifest) - if !ok { - t.Fatalf("unexpected manifest type from signedstore") - } - - if !reflect.DeepEqual(fetchedManifest, sm) { - t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest, sm) - } - - _, pl, err := fetchedManifest.Payload() - if err != nil { - t.Fatalf("error getting payload %#v", err) - } - - fetchedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") - if err != nil { - t.Fatalf("unexpected error parsing jws: %v", err) - } - - payload, err := fetchedJWS.Payload() - if err != nil { - t.Fatalf("unexpected error extracting payload: %v", err) - } - - // Now that we have a payload, take a moment to check that the manifest is - // return by the payload digest. - - dgst := digest.FromBytes(payload) - exists, err = ms.Exists(ctx, dgst) - if err != nil { - t.Fatalf("error checking manifest existence by digest: %v", err) - } - - if !exists { - t.Fatalf("manifest %s should exist", dgst) - } - - fetchedByDigest, err := ms.Get(ctx, dgst) - if err != nil { - t.Fatalf("unexpected error fetching manifest by digest: %v", err) - } - - if !reflect.DeepEqual(fetchedByDigest, fetchedManifest) { - t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedByDigest, fetchedManifest) - } - - sigs, err := fetchedJWS.Signatures() - if err != nil { - t.Fatalf("unable to extract signatures: %v", err) - } - - if len(sigs) != 1 { - t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1) - } - - // Now, push the same manifest with a different key - pk2, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key: %v", err) - } - - sm2, err := schema1.Sign(&m, pk2) - if err != nil { - t.Fatalf("unexpected error signing manifest: %v", err) - } - _, pl, err = sm2.Payload() - if err != nil { - t.Fatalf("error getting payload %#v", err) - } - - jws2, err := libtrust.ParsePrettySignature(pl, "signatures") - if err != nil { - t.Fatalf("error parsing signature: %v", err) - } - - sigs2, err := jws2.Signatures() - if err != nil { - t.Fatalf("unable to extract signatures: %v", err) - } - - if len(sigs2) != 1 { - t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1) - } - - if manifestDigest, err = ms.Put(ctx, sm2); err != nil { - t.Fatalf("unexpected error putting manifest: %v", err) - } - - fromStore, err = ms.Get(ctx, manifestDigest) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - - fetched, ok := fromStore.(*schema1.SignedManifest) - if !ok { - t.Fatalf("unexpected type from signed manifeststore : %T", fetched) - } - - if _, err := schema1.Verify(fetched); err != nil { - t.Fatalf("unexpected error verifying manifest: %v", err) - } - - // Assemble our payload and two signatures to get what we expect! - expectedJWS, err := libtrust.NewJSONSignature(payload, sigs[0], sigs2[0]) - if err != nil { - t.Fatalf("unexpected error merging jws: %v", err) - } - - expectedSigs, err := expectedJWS.Signatures() - if err != nil { - t.Fatalf("unexpected error getting expected signatures: %v", err) - } - - _, pl, err = fetched.Payload() - if err != nil { - t.Fatalf("error getting payload %#v", err) - } - - receivedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") - if err != nil { - t.Fatalf("unexpected error parsing jws: %v", err) - } - - receivedPayload, err := receivedJWS.Payload() - if err != nil { - t.Fatalf("unexpected error extracting received payload: %v", err) - } - - if !bytes.Equal(receivedPayload, payload) { - t.Fatalf("payloads are not equal") - } - - receivedSigs, err := receivedJWS.Signatures() - if err != nil { - t.Fatalf("error getting signatures: %v", err) - } - - for i, sig := range receivedSigs { - if !bytes.Equal(sig, expectedSigs[i]) { - t.Fatalf("mismatched signatures from remote: %v != %v", string(sig), string(expectedSigs[i])) - } - } - - // Test deleting manifests - err = ms.Delete(ctx, dgst) - if err != nil { - t.Fatalf("unexpected an error deleting manifest by digest: %v", err) - } - - exists, err = ms.Exists(ctx, dgst) - if err != nil { - t.Fatalf("Error querying manifest existence") - } - if exists { - t.Errorf("Deleted manifest should not exist") - } - - deletedManifest, err := ms.Get(ctx, dgst) - if err == nil { - t.Errorf("Unexpected success getting deleted manifest") - } - switch err.(type) { - case distribution.ErrManifestUnknownRevision: - break - default: - t.Errorf("Unexpected error getting deleted manifest: %s", reflect.ValueOf(err).Type()) - } - - if deletedManifest != nil { - t.Errorf("Deleted manifest get returned non-nil") - } - - // Re-upload should restore manifest to a good state - _, err = ms.Put(ctx, sm) - if err != nil { - t.Errorf("Error re-uploading deleted manifest") - } - - exists, err = ms.Exists(ctx, dgst) - if err != nil { - t.Fatalf("Error querying manifest existence") - } - if !exists { - t.Errorf("Restored manifest should exist") - } - - deletedManifest, err = ms.Get(ctx, dgst) - if err != nil { - t.Errorf("Unexpected error getting manifest") - } - if deletedManifest == nil { - t.Errorf("Deleted manifest get returned non-nil") - } - - r, err := NewRegistry(ctx, env.driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - repo, err := r.Repository(ctx, env.name) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - ms, err = repo.Manifests(ctx) - if err != nil { - t.Fatal(err) - } - err = ms.Delete(ctx, dgst) - if err == nil { - t.Errorf("Unexpected success deleting while disabled") - } -} - -// TestLinkPathFuncs ensures that the link path functions behavior are locked -// down and implemented as expected. -func TestLinkPathFuncs(t *testing.T) { - for _, testcase := range []struct { - repo string - digest digest.Digest - linkPathFn linkPathFunc - expected string - }{ - { - repo: "foo/bar", - digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - linkPathFn: blobLinkPath, - expected: "/docker/registry/v2/repositories/foo/bar/_layers/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", - }, - { - repo: "foo/bar", - digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - linkPathFn: manifestRevisionLinkPath, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", - }, - } { - p, err := testcase.linkPathFn(testcase.repo, testcase.digest) - if err != nil { - t.Fatalf("unexpected error calling linkPathFn(pm, %q, %q): %v", testcase.repo, testcase.digest, err) - } - - if p != testcase.expected { - t.Fatalf("incorrect path returned: %q != %q", p, testcase.expected) - } - } - -} diff --git a/vendor/github.com/docker/distribution/registry/storage/paths.go b/vendor/github.com/docker/distribution/registry/storage/paths.go deleted file mode 100644 index 4d2d48c1..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/paths.go +++ /dev/null @@ -1,493 +0,0 @@ -package storage - -import ( - "fmt" - "path" - "strings" - - "github.com/docker/distribution/digest" -) - -const ( - storagePathVersion = "v2" // fixed storage layout version - storagePathRoot = "/docker/registry/" // all driver paths have a prefix - - // TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though - // storage path root would configurable for all drivers through this - // package. In reality, we've found it simpler to do this on a per driver - // basis. -) - -// pathFor maps paths based on "object names" and their ids. The "object -// names" mapped by are internal to the storage system. -// -// The path layout in the storage backend is roughly as follows: -// -// /v2 -// -> repositories/ -// ->/ -// -> _manifests/ -// revisions -// -> -// -> link -// -> signatures -// //link -// tags/ -// -> current/link -// -> index -// -> //link -// -> _layers/ -// -// -> _uploads/ -// data -// startedat -// hashstates// -// -> blob/ -// -// -// The storage backend layout is broken up into a content-addressable blob -// store and repositories. The content-addressable blob store holds most data -// throughout the backend, keyed by algorithm and digests of the underlying -// content. Access to the blob store is controled through links from the -// repository to blobstore. -// -// A repository is made up of layers, manifests and tags. The layers component -// is just a directory of layers which are "linked" into a repository. A layer -// can only be accessed through a qualified repository name if it is linked in -// the repository. Uploads of layers are managed in the uploads directory, -// which is key by upload id. When all data for an upload is received, the -// data is moved into the blob store and the upload directory is deleted. -// Abandoned uploads can be garbage collected by reading the startedat file -// and removing uploads that have been active for longer than a certain time. -// -// The third component of the repository directory is the manifests store, -// which is made up of a revision store and tag store. Manifests are stored in -// the blob store and linked into the revision store. Signatures are separated -// from the manifest payload data and linked into the blob store, as well. -// While the registry can save all revisions of a manifest, no relationship is -// implied as to the ordering of changes to a manifest. The tag store provides -// support for name, tag lookups of manifests, using "current/link" under a -// named tag directory. An index is maintained to support deletions of all -// revisions of a given manifest tag. -// -// We cover the path formats implemented by this path mapper below. -// -// Manifests: -// -// manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// -// manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link -// manifestSignaturesPathSpec: /v2/repositories//_manifests/revisions///signatures/ -// manifestSignatureLinkPathSpec: /v2/repositories//_manifests/revisions///signatures///link -// -// Tags: -// -// manifestTagsPathSpec: /v2/repositories//_manifests/tags/ -// manifestTagPathSpec: /v2/repositories//_manifests/tags// -// manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link -// manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ -// manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// -// manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link -// -// Blobs: -// -// layerLinkPathSpec: /v2/repositories//_layers///link -// -// Uploads: -// -// uploadDataPathSpec: /v2/repositories//_uploads//data -// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat -// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// -// -// Blob Store: -// -// blobPathSpec: /v2/blobs/// -// blobDataPathSpec: /v2/blobs////data -// blobMediaTypePathSpec: /v2/blobs////data -// -// For more information on the semantic meaning of each path and their -// contents, please see the path spec documentation. -func pathFor(spec pathSpec) (string, error) { - - // Switch on the path object type and return the appropriate path. At - // first glance, one may wonder why we don't use an interface to - // accomplish this. By keep the formatting separate from the pathSpec, we - // keep separate the path generation componentized. These specs could be - // passed to a completely different mapper implementation and generate a - // different set of paths. - // - // For example, imagine migrating from one backend to the other: one could - // build a filesystem walker that converts a string path in one version, - // to an intermediate path object, than can be consumed and mapped by the - // other version. - - rootPrefix := []string{storagePathRoot, storagePathVersion} - repoPrefix := append(rootPrefix, "repositories") - - switch v := spec.(type) { - - case manifestRevisionPathSpec: - components, err := digestPathComponents(v.revision, false) - if err != nil { - return "", err - } - - return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil - case manifestRevisionLinkPathSpec: - root, err := pathFor(manifestRevisionPathSpec{ - name: v.name, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "link"), nil - case manifestSignaturesPathSpec: - root, err := pathFor(manifestRevisionPathSpec{ - name: v.name, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "signatures"), nil - case manifestSignatureLinkPathSpec: - root, err := pathFor(manifestSignaturesPathSpec{ - name: v.name, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - signatureComponents, err := digestPathComponents(v.signature, false) - if err != nil { - return "", err - } - - return path.Join(root, path.Join(append(signatureComponents, "link")...)), nil - case manifestTagsPathSpec: - return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil - case manifestTagPathSpec: - root, err := pathFor(manifestTagsPathSpec{ - name: v.name, - }) - - if err != nil { - return "", err - } - - return path.Join(root, v.tag), nil - case manifestTagCurrentPathSpec: - root, err := pathFor(manifestTagPathSpec{ - name: v.name, - tag: v.tag, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "current", "link"), nil - case manifestTagIndexPathSpec: - root, err := pathFor(manifestTagPathSpec{ - name: v.name, - tag: v.tag, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "index"), nil - case manifestTagIndexEntryLinkPathSpec: - root, err := pathFor(manifestTagIndexEntryPathSpec{ - name: v.name, - tag: v.tag, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "link"), nil - case manifestTagIndexEntryPathSpec: - root, err := pathFor(manifestTagIndexPathSpec{ - name: v.name, - tag: v.tag, - }) - - if err != nil { - return "", err - } - - components, err := digestPathComponents(v.revision, false) - if err != nil { - return "", err - } - - return path.Join(root, path.Join(components...)), nil - case layerLinkPathSpec: - components, err := digestPathComponents(v.digest, false) - if err != nil { - return "", err - } - - // TODO(stevvooe): Right now, all blobs are linked under "_layers". If - // we have future migrations, we may want to rename this to "_blobs". - // A migration strategy would simply leave existing items in place and - // write the new paths, commit a file then delete the old files. - - blobLinkPathComponents := append(repoPrefix, v.name, "_layers") - - return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil - case blobDataPathSpec: - components, err := digestPathComponents(v.digest, true) - if err != nil { - return "", err - } - - components = append(components, "data") - blobPathPrefix := append(rootPrefix, "blobs") - return path.Join(append(blobPathPrefix, components...)...), nil - - case uploadDataPathSpec: - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil - case uploadStartedAtPathSpec: - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil - case uploadHashStatePathSpec: - offset := fmt.Sprintf("%d", v.offset) - if v.list { - offset = "" // Limit to the prefix for listing offsets. - } - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil - case repositoriesRootPathSpec: - return path.Join(repoPrefix...), nil - default: - // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). - return "", fmt.Errorf("unknown path spec: %#v", v) - } -} - -// pathSpec is a type to mark structs as path specs. There is no -// implementation because we'd like to keep the specs and the mappers -// decoupled. -type pathSpec interface { - pathSpec() -} - -// manifestRevisionPathSpec describes the components of the directory path for -// a manifest revision. -type manifestRevisionPathSpec struct { - name string - revision digest.Digest -} - -func (manifestRevisionPathSpec) pathSpec() {} - -// manifestRevisionLinkPathSpec describes the path components required to look -// up the data link for a revision of a manifest. If this file is not present, -// the manifest blob is not available in the given repo. The contents of this -// file should just be the digest. -type manifestRevisionLinkPathSpec struct { - name string - revision digest.Digest -} - -func (manifestRevisionLinkPathSpec) pathSpec() {} - -// manifestSignaturesPathSpec decribes the path components for the directory -// containing all the signatures for the target blob. Entries are named with -// the underlying key id. -type manifestSignaturesPathSpec struct { - name string - revision digest.Digest -} - -func (manifestSignaturesPathSpec) pathSpec() {} - -// manifestSignatureLinkPathSpec decribes the path components used to look up -// a signature file by the hash of its blob. -type manifestSignatureLinkPathSpec struct { - name string - revision digest.Digest - signature digest.Digest -} - -func (manifestSignatureLinkPathSpec) pathSpec() {} - -// manifestTagsPathSpec describes the path elements required to point to the -// manifest tags directory. -type manifestTagsPathSpec struct { - name string -} - -func (manifestTagsPathSpec) pathSpec() {} - -// manifestTagPathSpec describes the path elements required to point to the -// manifest tag links files under a repository. These contain a blob id that -// can be used to look up the data and signatures. -type manifestTagPathSpec struct { - name string - tag string -} - -func (manifestTagPathSpec) pathSpec() {} - -// manifestTagCurrentPathSpec describes the link to the current revision for a -// given tag. -type manifestTagCurrentPathSpec struct { - name string - tag string -} - -func (manifestTagCurrentPathSpec) pathSpec() {} - -// manifestTagCurrentPathSpec describes the link to the index of revisions -// with the given tag. -type manifestTagIndexPathSpec struct { - name string - tag string -} - -func (manifestTagIndexPathSpec) pathSpec() {} - -// manifestTagIndexEntryPathSpec contains the entries of the index by revision. -type manifestTagIndexEntryPathSpec struct { - name string - tag string - revision digest.Digest -} - -func (manifestTagIndexEntryPathSpec) pathSpec() {} - -// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a -// manifest with given tag within the index. -type manifestTagIndexEntryLinkPathSpec struct { - name string - tag string - revision digest.Digest -} - -func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} - -// blobLinkPathSpec specifies a path for a blob link, which is a file with a -// blob id. The blob link will contain a content addressable blob id reference -// into the blob store. The format of the contents is as follows: -// -// : -// -// The following example of the file contents is more illustrative: -// -// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 -// -// This indicates that there is a blob with the id/digest, calculated via -// sha256 that can be fetched from the blob store. -type layerLinkPathSpec struct { - name string - digest digest.Digest -} - -func (layerLinkPathSpec) pathSpec() {} - -// blobAlgorithmReplacer does some very simple path sanitization for user -// input. Paths should be "safe" before getting this far due to strict digest -// requirements but we can add further path conversion here, if needed. -var blobAlgorithmReplacer = strings.NewReplacer( - "+", "/", - ".", "/", - ";", "/", -) - -// // blobPathSpec contains the path for the registry global blob store. -// type blobPathSpec struct { -// digest digest.Digest -// } - -// func (blobPathSpec) pathSpec() {} - -// blobDataPathSpec contains the path for the registry global blob store. For -// now, this contains layer data, exclusively. -type blobDataPathSpec struct { - digest digest.Digest -} - -func (blobDataPathSpec) pathSpec() {} - -// uploadDataPathSpec defines the path parameters of the data file for -// uploads. -type uploadDataPathSpec struct { - name string - id string -} - -func (uploadDataPathSpec) pathSpec() {} - -// uploadDataPathSpec defines the path parameters for the file that stores the -// start time of an uploads. If it is missing, the upload is considered -// unknown. Admittedly, the presence of this file is an ugly hack to make sure -// we have a way to cleanup old or stalled uploads that doesn't rely on driver -// FileInfo behavior. If we come up with a more clever way to do this, we -// should remove this file immediately and rely on the startetAt field from -// the client to enforce time out policies. -type uploadStartedAtPathSpec struct { - name string - id string -} - -func (uploadStartedAtPathSpec) pathSpec() {} - -// uploadHashStatePathSpec defines the path parameters for the file that stores -// the hash function state of an upload at a specific byte offset. If `list` is -// set, then the path mapper will generate a list prefix for all hash state -// offsets for the upload identified by the name, id, and alg. -type uploadHashStatePathSpec struct { - name string - id string - alg digest.Algorithm - offset int64 - list bool -} - -func (uploadHashStatePathSpec) pathSpec() {} - -// repositoriesRootPathSpec returns the root of repositories -type repositoriesRootPathSpec struct { -} - -func (repositoriesRootPathSpec) pathSpec() {} - -// digestPathComponents provides a consistent path breakdown for a given -// digest. For a generic digest, it will be as follows: -// -// / -// -// If multilevel is true, the first two bytes of the digest will separate -// groups of digest folder. It will be as follows: -// -// // -// -func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { - if err := dgst.Validate(); err != nil { - return nil, err - } - - algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) - hex := dgst.Hex() - prefix := []string{algorithm} - - var suffix []string - - if multilevel { - suffix = append(suffix, hex[:2]) - } - - suffix = append(suffix, hex) - - return append(prefix, suffix...), nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/paths_test.go b/vendor/github.com/docker/distribution/registry/storage/paths_test.go deleted file mode 100644 index 2ad78e9d..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/paths_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package storage - -import ( - "testing" -) - -func TestPathMapper(t *testing.T) { - for _, testcase := range []struct { - spec pathSpec - expected string - err error - }{ - { - spec: manifestRevisionPathSpec{ - name: "foo/bar", - revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - { - spec: manifestRevisionLinkPathSpec{ - name: "foo/bar", - revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", - }, - { - spec: manifestSignatureLinkPathSpec{ - name: "foo/bar", - revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - signature: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/signatures/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", - }, - { - spec: manifestSignaturesPathSpec{ - name: "foo/bar", - revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/signatures", - }, - { - spec: manifestTagsPathSpec{ - name: "foo/bar", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags", - }, - { - spec: manifestTagPathSpec{ - name: "foo/bar", - tag: "thetag", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag", - }, - { - spec: manifestTagCurrentPathSpec{ - name: "foo/bar", - tag: "thetag", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/current/link", - }, - { - spec: manifestTagIndexPathSpec{ - name: "foo/bar", - tag: "thetag", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index", - }, - { - spec: manifestTagIndexEntryPathSpec{ - name: "foo/bar", - tag: "thetag", - revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - { - spec: manifestTagIndexEntryLinkPathSpec{ - name: "foo/bar", - tag: "thetag", - revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", - }, - - { - spec: uploadDataPathSpec{ - name: "foo/bar", - id: "asdf-asdf-asdf-adsf", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", - }, - { - spec: uploadStartedAtPathSpec{ - name: "foo/bar", - id: "asdf-asdf-asdf-adsf", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", - }, - } { - p, err := pathFor(testcase.spec) - if err != nil { - t.Fatalf("unexpected generating path (%T): %v", testcase.spec, err) - } - - if p != testcase.expected { - t.Fatalf("unexpected path generated (%T): %q != %q", testcase.spec, p, testcase.expected) - } - } - - // Add a few test cases to ensure we cover some errors - - // Specify a path that requires a revision and get a digest validation error. - badpath, err := pathFor(manifestSignaturesPathSpec{ - name: "foo/bar", - }) - - if err == nil { - t.Fatalf("expected an error when mapping an invalid revision: %s", badpath) - } - -} diff --git a/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go b/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go deleted file mode 100644 index 7576b189..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go +++ /dev/null @@ -1,139 +0,0 @@ -package storage - -import ( - "path" - "strings" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/context" - storageDriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/uuid" -) - -// uploadData stored the location of temporary files created during a layer upload -// along with the date the upload was started -type uploadData struct { - containingDir string - startedAt time.Time -} - -func newUploadData() uploadData { - return uploadData{ - containingDir: "", - // default to far in future to protect against missing startedat - startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), - } -} - -// PurgeUploads deletes files from the upload directory -// created before olderThan. The list of files deleted and errors -// encountered are returned -func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { - log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) - uploadData, errors := getOutstandingUploads(ctx, driver) - var deleted []string - for _, uploadData := range uploadData { - if uploadData.startedAt.Before(olderThan) { - var err error - log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", - uploadData.containingDir, uploadData.startedAt, olderThan) - if actuallyDelete { - err = driver.Delete(ctx, uploadData.containingDir) - } - if err == nil { - deleted = append(deleted, uploadData.containingDir) - } else { - errors = append(errors, err) - } - } - } - - log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) - return deleted, errors -} - -// getOutstandingUploads walks the upload directory, collecting files -// which could be eligible for deletion. The only reliable way to -// classify the age of a file is with the date stored in the startedAt -// file, so gather files by UUID with a date from startedAt. -func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) { - var errors []error - uploads := make(map[string]uploadData, 0) - - inUploadDir := false - root, err := pathFor(repositoriesRootPathSpec{}) - if err != nil { - return uploads, append(errors, err) - } - - err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { - filePath := fileInfo.Path() - _, file := path.Split(filePath) - if file[0] == '_' { - // Reserved directory - inUploadDir = (file == "_uploads") - - if fileInfo.IsDir() && !inUploadDir { - return ErrSkipDir - } - - } - - uuid, isContainingDir := uUIDFromPath(filePath) - if uuid == "" { - // Cannot reliably delete - return nil - } - ud, ok := uploads[uuid] - if !ok { - ud = newUploadData() - } - if isContainingDir { - ud.containingDir = filePath - } - if file == "startedat" { - if t, err := readStartedAtFile(driver, filePath); err == nil { - ud.startedAt = t - } else { - errors = pushError(errors, filePath, err) - } - - } - - uploads[uuid] = ud - return nil - }) - - if err != nil { - errors = pushError(errors, root, err) - } - return uploads, errors -} - -// uUIDFromPath extracts the upload UUID from a given path -// If the UUID is the last path component, this is the containing -// directory for all upload files -func uUIDFromPath(path string) (string, bool) { - components := strings.Split(path, "/") - for i := len(components) - 1; i >= 0; i-- { - if u, err := uuid.Parse(components[i]); err == nil { - return u.String(), i == len(components)-1 - } - } - return "", false -} - -// readStartedAtFile reads the date from an upload's startedAtFile -func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { - // todo:(richardscothern) - pass in a context - startedAtBytes, err := driver.GetContent(context.Background(), path) - if err != nil { - return time.Now(), err - } - startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) - if err != nil { - return time.Now(), err - } - return startedAt, nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/purgeuploads_test.go b/vendor/github.com/docker/distribution/registry/storage/purgeuploads_test.go deleted file mode 100644 index 3b70f723..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/purgeuploads_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package storage - -import ( - "path" - "strings" - "testing" - "time" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/uuid" -) - -func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) { - d := inmemory.New() - ctx := context.Background() - for i := 0; i < numUploads; i++ { - addUploads(ctx, t, d, uuid.Generate().String(), repoName, startedAt) - } - return d, ctx -} - -func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { - dataPath, err := pathFor(uploadDataPathSpec{name: repo, id: uploadID}) - if err != nil { - t.Fatalf("Unable to resolve path") - } - if err := d.PutContent(ctx, dataPath, []byte("")); err != nil { - t.Fatalf("Unable to write data file") - } - - startedAtPath, err := pathFor(uploadStartedAtPathSpec{name: repo, id: uploadID}) - if err != nil { - t.Fatalf("Unable to resolve path") - } - - if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { - t.Fatalf("Unable to write startedAt file") - } - -} - -func TestPurgeGather(t *testing.T) { - uploadCount := 5 - fs, ctx := testUploadFS(t, uploadCount, "test-repo", time.Now()) - uploadData, errs := getOutstandingUploads(ctx, fs) - if len(errs) != 0 { - t.Errorf("Unexepected errors: %q", errs) - } - if len(uploadData) != uploadCount { - t.Errorf("Unexpected upload file count: %d != %d", uploadCount, len(uploadData)) - } -} - -func TestPurgeNone(t *testing.T) { - fs, ctx := testUploadFS(t, 10, "test-repo", time.Now()) - oneHourAgo := time.Now().Add(-1 * time.Hour) - deleted, errs := PurgeUploads(ctx, fs, oneHourAgo, true) - if len(errs) != 0 { - t.Error("Unexpected errors", errs) - } - if len(deleted) != 0 { - t.Errorf("Unexpectedly deleted files for time: %s", oneHourAgo) - } -} - -func TestPurgeAll(t *testing.T) { - uploadCount := 10 - oneHourAgo := time.Now().Add(-1 * time.Hour) - fs, ctx := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) - - // Ensure > 1 repos are purged - addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo2", oneHourAgo) - uploadCount++ - - deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) - if len(errs) != 0 { - t.Error("Unexpected errors:", errs) - } - fileCount := uploadCount - if len(deleted) != fileCount { - t.Errorf("Unexpectedly deleted file count %d != %d", - len(deleted), fileCount) - } -} - -func TestPurgeSome(t *testing.T) { - oldUploadCount := 5 - oneHourAgo := time.Now().Add(-1 * time.Hour) - fs, ctx := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) - - newUploadCount := 4 - - for i := 0; i < newUploadCount; i++ { - addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo", time.Now().Add(1*time.Hour)) - } - - deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) - if len(errs) != 0 { - t.Error("Unexpected errors:", errs) - } - if len(deleted) != oldUploadCount { - t.Errorf("Unexpectedly deleted file count %d != %d", - len(deleted), oldUploadCount) - } -} - -func TestPurgeOnlyUploads(t *testing.T) { - oldUploadCount := 5 - oneHourAgo := time.Now().Add(-1 * time.Hour) - fs, ctx := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) - - // Create a directory tree outside _uploads and ensure - // these files aren't deleted. - dataPath, err := pathFor(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()}) - if err != nil { - t.Fatalf(err.Error()) - } - nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1) - if strings.Index(nonUploadPath, "_upload") != -1 { - t.Fatalf("Non-upload path not created correctly") - } - - nonUploadFile := path.Join(nonUploadPath, "file") - if err = fs.PutContent(ctx, nonUploadFile, []byte("")); err != nil { - t.Fatalf("Unable to write data file") - } - - deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) - if len(errs) != 0 { - t.Error("Unexpected errors", errs) - } - for _, file := range deleted { - if strings.Index(file, "_upload") == -1 { - t.Errorf("Non-upload file deleted") - } - } -} - -func TestPurgeMissingStartedAt(t *testing.T) { - oneHourAgo := time.Now().Add(-1 * time.Hour) - fs, ctx := testUploadFS(t, 1, "test-repo", oneHourAgo) - - err := Walk(ctx, fs, "/", func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - _, file := path.Split(filePath) - - if file == "startedat" { - if err := fs.Delete(ctx, filePath); err != nil { - t.Fatalf("Unable to delete startedat file: %s", filePath) - } - } - return nil - }) - if err != nil { - t.Fatalf("Unexpected error during Walk: %s ", err.Error()) - } - deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) - if len(errs) > 0 { - t.Errorf("Unexpected errors") - } - if len(deleted) > 0 { - t.Errorf("Files unexpectedly deleted: %s", deleted) - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/registry.go b/vendor/github.com/docker/distribution/registry/storage/registry.go deleted file mode 100644 index be570cbc..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/registry.go +++ /dev/null @@ -1,242 +0,0 @@ -package storage - -import ( - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// registry is the top-level implementation of Registry for use in the storage -// package. All instances should descend from this object. -type registry struct { - blobStore *blobStore - blobServer *blobServer - statter *blobStatter // global statter service. - blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider - deleteEnabled bool - resumableDigestEnabled bool -} - -// RegistryOption is the type used for functional options for NewRegistry. -type RegistryOption func(*registry) error - -// EnableRedirect is a functional option for NewRegistry. It causes the backend -// blob server to attempt using (StorageDriver).URLFor to serve all blobs. -func EnableRedirect(registry *registry) error { - registry.blobServer.redirect = true - return nil -} - -// EnableDelete is a functional option for NewRegistry. It enables deletion on -// the registry. -func EnableDelete(registry *registry) error { - registry.deleteEnabled = true - return nil -} - -// DisableDigestResumption is a functional option for NewRegistry. It should be -// used if the registry is acting as a caching proxy. -func DisableDigestResumption(registry *registry) error { - registry.resumableDigestEnabled = false - return nil -} - -// BlobDescriptorCacheProvider returns a functional option for -// NewRegistry. It creates a cached blob statter for use by the -// registry. -func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption { - // TODO(aaronl): The duplication of statter across several objects is - // ugly, and prevents us from using interface types in the registry - // struct. Ideally, blobStore and blobServer should be lazily - // initialized, and use the current value of - // blobDescriptorCacheProvider. - return func(registry *registry) error { - if blobDescriptorCacheProvider != nil { - statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter) - registry.blobStore.statter = statter - registry.blobServer.statter = statter - registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider - } - return nil - } -} - -// NewRegistry creates a new registry instance from the provided driver. The -// resulting registry may be shared by multiple goroutines but is cheap to -// allocate. If the Redirect option is specified, the backend blob server will -// attempt to use (StorageDriver).URLFor to serve all blobs. -func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) { - // create global statter - statter := &blobStatter{ - driver: driver, - } - - bs := &blobStore{ - driver: driver, - statter: statter, - } - - registry := ®istry{ - blobStore: bs, - blobServer: &blobServer{ - driver: driver, - statter: statter, - pathFn: bs.path, - }, - statter: statter, - resumableDigestEnabled: true, - } - - for _, option := range options { - if err := option(registry); err != nil { - return nil, err - } - } - - return registry, nil -} - -// Scope returns the namespace scope for a registry. The registry -// will only serve repositories contained within this scope. -func (reg *registry) Scope() distribution.Scope { - return distribution.GlobalScope -} - -// Repository returns an instance of the repository tied to the registry. -// Instances should not be shared between goroutines but are cheap to -// allocate. In general, they should be request scoped. -func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) { - var descriptorCache distribution.BlobDescriptorService - if reg.blobDescriptorCacheProvider != nil { - var err error - descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name()) - if err != nil { - return nil, err - } - } - - return &repository{ - ctx: ctx, - registry: reg, - name: canonicalName, - descriptorCache: descriptorCache, - }, nil -} - -// repository provides name-scoped access to various services. -type repository struct { - *registry - ctx context.Context - name reference.Named - descriptorCache distribution.BlobDescriptorService -} - -// Name returns the name of the repository. -func (repo *repository) Name() reference.Named { - return repo.name -} - -func (repo *repository) Tags(ctx context.Context) distribution.TagService { - tags := &tagStore{ - repository: repo, - blobStore: repo.registry.blobStore, - } - - return tags -} - -// Manifests returns an instance of ManifestService. Instantiation is cheap and -// may be context sensitive in the future. The instance should be used similar -// to a request local. -func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - manifestLinkPathFns := []linkPathFunc{ - // NOTE(stevvooe): Need to search through multiple locations since - // 2.1.0 unintentionally linked into _layers. - manifestRevisionLinkPath, - blobLinkPath, - } - - blobStore := &linkedBlobStore{ - ctx: ctx, - blobStore: repo.blobStore, - repository: repo, - deleteEnabled: repo.registry.deleteEnabled, - blobAccessController: &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPathFns: manifestLinkPathFns, - }, - - // TODO(stevvooe): linkPath limits this blob store to only - // manifests. This instance cannot be used for blob checks. - linkPathFns: manifestLinkPathFns, - } - - ms := &manifestStore{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - schema1Handler: &signedManifestHandler{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - signatures: &signatureStore{ - ctx: ctx, - repository: repo, - blobStore: repo.blobStore, - }, - }, - schema2Handler: &schema2ManifestHandler{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - }, - manifestListHandler: &manifestListHandler{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - }, - } - - // Apply options - for _, option := range options { - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - - return ms, nil -} - -// Blobs returns an instance of the BlobStore. Instantiation is cheap and -// may be context sensitive in the future. The instance should be used similar -// to a request local. -func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { - var statter distribution.BlobDescriptorService = &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPathFns: []linkPathFunc{blobLinkPath}, - } - - if repo.descriptorCache != nil { - statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) - } - - return &linkedBlobStore{ - registry: repo.registry, - blobStore: repo.blobStore, - blobServer: repo.blobServer, - blobAccessController: statter, - repository: repo, - ctx: ctx, - - // TODO(stevvooe): linkPath limits this blob store to only layers. - // This instance cannot be used for manifest checks. - linkPathFns: []linkPathFunc{blobLinkPath}, - deleteEnabled: repo.registry.deleteEnabled, - resumableDigestEnabled: repo.resumableDigestEnabled, - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go deleted file mode 100644 index 115786e2..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go +++ /dev/null @@ -1,99 +0,0 @@ -package storage - -import ( - "fmt" - - "encoding/json" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema2" -) - -//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. -type schema2ManifestHandler struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context -} - -var _ ManifestHandler = &schema2ManifestHandler{} - -func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") - - var m schema2.DeserializedManifest - if err := json.Unmarshal(content, &m); err != nil { - return nil, err - } - - return &m, nil -} - -func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") - - m, ok := manifest.(*schema2.DeserializedManifest) - if !ok { - return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest) - } - - if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { - return "", err - } - - mt, payload, err := m.Payload() - if err != nil { - return "", err - } - - revision, err := ms.blobStore.Put(ctx, mt, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return "", err - } - - // Link the revision into the repository. - if err := ms.blobStore.linkBlob(ctx, revision); err != nil { - return "", err - } - - return revision.Digest, nil -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. As a policy, the registry only tries to store -// valid content, leaving trust policies of that content up to consumers. -func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { - var errs distribution.ErrManifestVerification - - if !skipDependencyVerification { - target := mnfst.Target() - _, err := ms.repository.Blobs(ctx).Stat(ctx, target.Digest) - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: target.Digest}) - } - - for _, fsLayer := range mnfst.References() { - _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) - } - } - } - if len(errs) != 0 { - return errs - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/signaturestore.go b/vendor/github.com/docker/distribution/registry/storage/signaturestore.go deleted file mode 100644 index 205d6009..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/signaturestore.go +++ /dev/null @@ -1,131 +0,0 @@ -package storage - -import ( - "path" - "sync" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" -) - -type signatureStore struct { - repository *repository - blobStore *blobStore - ctx context.Context -} - -func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { - signaturesPath, err := pathFor(manifestSignaturesPathSpec{ - name: s.repository.Name().Name(), - revision: dgst, - }) - - if err != nil { - return nil, err - } - - // Need to append signature digest algorithm to path to get all items. - // Perhaps, this should be in the pathMapper but it feels awkward. This - // can be eliminated by implementing listAll on drivers. - signaturesPath = path.Join(signaturesPath, "sha256") - - signaturePaths, err := s.blobStore.driver.List(s.ctx, signaturesPath) - if err != nil { - return nil, err - } - - var wg sync.WaitGroup - type result struct { - index int - signature []byte - err error - } - ch := make(chan result) - - bs := s.linkedBlobStore(s.ctx, dgst) - for i, sigPath := range signaturePaths { - sigdgst, err := digest.ParseDigest("sha256:" + path.Base(sigPath)) - if err != nil { - context.GetLogger(s.ctx).Errorf("could not get digest from path: %q, skipping", sigPath) - continue - } - - wg.Add(1) - go func(idx int, sigdgst digest.Digest) { - defer wg.Done() - context.GetLogger(s.ctx). - Debugf("fetching signature %q", sigdgst) - - r := result{index: idx} - - if p, err := bs.Get(s.ctx, sigdgst); err != nil { - context.GetLogger(s.ctx). - Errorf("error fetching signature %q: %v", sigdgst, err) - r.err = err - } else { - r.signature = p - } - - ch <- r - }(i, sigdgst) - } - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - // aggregrate the results - signatures := make([][]byte, len(signaturePaths)) -loop: - for { - select { - case result := <-ch: - signatures[result.index] = result.signature - if result.err != nil && err == nil { - // only set the first one. - err = result.err - } - case <-done: - break loop - } - } - - return signatures, err -} - -func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { - bs := s.linkedBlobStore(s.ctx, dgst) - for _, signature := range signatures { - if _, err := bs.Put(s.ctx, "application/json", signature); err != nil { - return err - } - } - return nil -} - -// linkedBlobStore returns the namedBlobStore of the signatures for the -// manifest with the given digest. Effectively, each signature link path -// layout is a unique linked blob store. -func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Digest) *linkedBlobStore { - linkpath := func(name string, dgst digest.Digest) (string, error) { - return pathFor(manifestSignatureLinkPathSpec{ - name: name, - revision: revision, - signature: dgst, - }) - - } - - return &linkedBlobStore{ - ctx: ctx, - repository: s.repository, - blobStore: s.blobStore, - blobAccessController: &linkedBlobStatter{ - blobStore: s.blobStore, - repository: s.repository, - linkPathFns: []linkPathFunc{linkpath}, - }, - linkPathFns: []linkPathFunc{linkpath}, - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go b/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go deleted file mode 100644 index 02663226..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go +++ /dev/null @@ -1,150 +0,0 @@ -package storage - -import ( - "encoding/json" - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/reference" - "github.com/docker/libtrust" -) - -// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It -// can unmarshal and put schema1 manifests that have been signed by libtrust. -type signedManifestHandler struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context - signatures *signatureStore -} - -var _ ManifestHandler = &signedManifestHandler{} - -func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") - // Fetch the signatures for the manifest - signatures, err := ms.signatures.Get(dgst) - if err != nil { - return nil, err - } - - jsig, err := libtrust.NewJSONSignature(content, signatures...) - if err != nil { - return nil, err - } - - // Extract the pretty JWS - raw, err := jsig.PrettySignature("signatures") - if err != nil { - return nil, err - } - - var sm schema1.SignedManifest - if err := json.Unmarshal(raw, &sm); err != nil { - return nil, err - } - return &sm, nil -} - -func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") - - sm, ok := manifest.(*schema1.SignedManifest) - if !ok { - return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest) - } - - if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil { - return "", err - } - - mt := schema1.MediaTypeManifest - payload := sm.Canonical - - revision, err := ms.blobStore.Put(ctx, mt, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return "", err - } - - // Link the revision into the repository. - if err := ms.blobStore.linkBlob(ctx, revision); err != nil { - return "", err - } - - // Grab each json signature and store them. - signatures, err := sm.Signatures() - if err != nil { - return "", err - } - - if err := ms.signatures.Put(revision.Digest, signatures...); err != nil { - return "", err - } - - return revision.Digest, nil -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. It ensures that the signature is valid for the -// enclosed payload. As a policy, the registry only tries to store valid -// content, leaving trust policies of that content up to consumers. -func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error { - var errs distribution.ErrManifestVerification - - if len(mnfst.Name) > reference.NameTotalLengthMax { - errs = append(errs, - distribution.ErrManifestNameInvalid{ - Name: mnfst.Name, - Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), - }) - } - - if !reference.NameRegexp.MatchString(mnfst.Name) { - errs = append(errs, - distribution.ErrManifestNameInvalid{ - Name: mnfst.Name, - Reason: fmt.Errorf("invalid manifest name format"), - }) - } - - if len(mnfst.History) != len(mnfst.FSLayers) { - errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", - len(mnfst.History), len(mnfst.FSLayers))) - } - - if _, err := schema1.Verify(&mnfst); err != nil { - switch err { - case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: - errs = append(errs, distribution.ErrManifestUnverified{}) - default: - if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust - errs = append(errs, distribution.ErrManifestUnverified{}) - } else { - errs = append(errs, err) - } - } - } - - if !skipDependencyVerification { - for _, fsLayer := range mnfst.References() { - _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) - } - } - } - if len(errs) != 0 { - return errs - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/tagstore.go b/vendor/github.com/docker/distribution/registry/storage/tagstore.go deleted file mode 100644 index 8381d244..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/tagstore.go +++ /dev/null @@ -1,191 +0,0 @@ -package storage - -import ( - "path" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -var _ distribution.TagService = &tagStore{} - -// tagStore provides methods to manage manifest tags in a backend storage driver. -// This implementation uses the same on-disk layout as the (now deleted) tag -// store. This provides backward compatibility with current registry deployments -// which only makes use of the Digest field of the returned distribution.Descriptor -// but does not enable full roundtripping of Descriptor objects -type tagStore struct { - repository *repository - blobStore *blobStore -} - -// All returns all tags -func (ts *tagStore) All(ctx context.Context) ([]string, error) { - var tags []string - - pathSpec, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Name().Name(), - }) - if err != nil { - return tags, err - } - - entries, err := ts.blobStore.driver.List(ctx, pathSpec) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Name().Name()} - default: - return tags, err - } - } - - for _, entry := range entries { - _, filename := path.Split(entry) - tags = append(tags, filename) - } - - return tags, nil -} - -// exists returns true if the specified manifest tag exists in the repository. -func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { - tagPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name().Name(), - tag: tag, - }) - - if err != nil { - return false, err - } - - exists, err := exists(ctx, ts.blobStore.driver, tagPath) - if err != nil { - return false, err - } - - return exists, nil -} - -// Tag tags the digest with the given tag, updating the the store to point at -// the current tag. The digest must point to a manifest. -func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name().Name(), - tag: tag, - }) - - if err != nil { - return err - } - - lbs := ts.linkedBlobStore(ctx, tag) - - // Link into the index - if err := lbs.linkBlob(ctx, desc); err != nil { - return err - } - - // Overwrite the current link - return ts.blobStore.link(ctx, currentPath, desc.Digest) -} - -// resolve the current revision for name and tag. -func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name().Name(), - tag: tag, - }) - - if err != nil { - return distribution.Descriptor{}, err - } - - revision, err := ts.blobStore.readlink(ctx, currentPath) - if err != nil { - switch err.(type) { - case storagedriver.PathNotFoundError: - return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} - } - - return distribution.Descriptor{}, err - } - - return distribution.Descriptor{Digest: revision}, nil -} - -// Untag removes the tag association -func (ts *tagStore) Untag(ctx context.Context, tag string) error { - tagPath, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Name().Name(), - tag: tag, - }) - - switch err.(type) { - case storagedriver.PathNotFoundError: - return distribution.ErrTagUnknown{Tag: tag} - case nil: - break - default: - return err - } - - return ts.blobStore.driver.Delete(ctx, tagPath) -} - -// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one -// to index manifest blobs by tag name. While the tag store doesn't map -// precisely to the linked blob store, using this ensures the links are -// managed via the same code path. -func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore { - return &linkedBlobStore{ - blobStore: ts.blobStore, - repository: ts.repository, - ctx: ctx, - linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) { - return pathFor(manifestTagIndexEntryLinkPathSpec{ - name: name, - tag: tag, - revision: dgst, - }) - - }}, - } -} - -// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by -// digest, tag entries which point to it need to be recovered to avoid dangling tags. -func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { - allTags, err := ts.All(ctx) - switch err.(type) { - case distribution.ErrRepositoryUnknown: - // This tag store has been initialized but not yet populated - break - case nil: - break - default: - return nil, err - } - - var tags []string - for _, tag := range allTags { - tagLinkPathSpec := manifestTagCurrentPathSpec{ - name: ts.repository.Name().Name(), - tag: tag, - } - - tagLinkPath, err := pathFor(tagLinkPathSpec) - tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) - if err != nil { - return nil, err - } - - if tagDigest == desc.Digest { - tags = append(tags, tag) - } - } - - return tags, nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go b/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go deleted file mode 100644 index 52873a69..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package storage - -import ( - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -type tagsTestEnv struct { - ts distribution.TagService - ctx context.Context -} - -func testTagStore(t *testing.T) *tagsTestEnv { - ctx := context.Background() - d := inmemory.New() - reg, err := NewRegistry(ctx, d) - if err != nil { - t.Fatal(err) - } - - repoRef, _ := reference.ParseNamed("a/b") - repo, err := reg.Repository(ctx, repoRef) - if err != nil { - t.Fatal(err) - } - - return &tagsTestEnv{ - ctx: ctx, - ts: repo.Tags(ctx), - } -} - -func TestTagStoreTag(t *testing.T) { - env := testTagStore(t) - tags := env.ts - ctx := env.ctx - - d := distribution.Descriptor{} - err := tags.Tag(ctx, "latest", d) - if err == nil { - t.Errorf("unexpected error putting malformed descriptor : %s", err) - } - - d.Digest = "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - err = tags.Tag(ctx, "latest", d) - if err != nil { - t.Error(err) - } - - d1, err := tags.Get(ctx, "latest") - if err != nil { - t.Error(err) - } - - if d1.Digest != d.Digest { - t.Error("put and get digest differ") - } - - // Overwrite existing - d.Digest = "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" - err = tags.Tag(ctx, "latest", d) - if err != nil { - t.Error(err) - } - - d1, err = tags.Get(ctx, "latest") - if err != nil { - t.Error(err) - } - - if d1.Digest != d.Digest { - t.Error("put and get digest differ") - } -} - -func TestTagStoreUnTag(t *testing.T) { - env := testTagStore(t) - tags := env.ts - ctx := env.ctx - desc := distribution.Descriptor{Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"} - - err := tags.Untag(ctx, "latest") - if err == nil { - t.Errorf("Expected error untagging non-existant tag") - } - - err = tags.Tag(ctx, "latest", desc) - if err != nil { - t.Error(err) - } - - err = tags.Untag(ctx, "latest") - if err != nil { - t.Error(err) - } - - _, err = tags.Get(ctx, "latest") - if err == nil { - t.Error("Expected error getting untagged tag") - } -} - -func TestTagStoreAll(t *testing.T) { - env := testTagStore(t) - tagStore := env.ts - ctx := env.ctx - - alpha := "abcdefghijklmnopqrstuvwxyz" - for i := 0; i < len(alpha); i++ { - tag := alpha[i] - desc := distribution.Descriptor{Digest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"} - err := tagStore.Tag(ctx, string(tag), desc) - if err != nil { - t.Error(err) - } - } - - all, err := tagStore.All(ctx) - if err != nil { - t.Error(err) - } - if len(all) != len(alpha) { - t.Errorf("Unexpected count returned from enumerate") - } - - for i, c := range all { - if c != string(alpha[i]) { - t.Errorf("unexpected tag in enumerate %s", c) - } - } - - removed := "a" - err = tagStore.Untag(ctx, removed) - if err != nil { - t.Error(err) - } - - all, err = tagStore.All(ctx) - if err != nil { - t.Error(err) - } - for _, tag := range all { - if tag == removed { - t.Errorf("unexpected tag in enumerate %s", removed) - } - } - -} - -func TestTagLookup(t *testing.T) { - env := testTagStore(t) - tagStore := env.ts - ctx := env.ctx - - descA := distribution.Descriptor{Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"} - desc0 := distribution.Descriptor{Digest: "sha256:0000000000000000000000000000000000000000000000000000000000000000"} - - tags, err := tagStore.Lookup(ctx, descA) - if err != nil { - t.Fatal(err) - } - if len(tags) != 0 { - t.Fatalf("Lookup returned > 0 tags from empty store") - } - - err = tagStore.Tag(ctx, "a", descA) - if err != nil { - t.Fatal(err) - } - - err = tagStore.Tag(ctx, "b", descA) - if err != nil { - t.Fatal(err) - } - - err = tagStore.Tag(ctx, "0", desc0) - if err != nil { - t.Fatal(err) - } - - err = tagStore.Tag(ctx, "1", desc0) - if err != nil { - t.Fatal(err) - } - - tags, err = tagStore.Lookup(ctx, descA) - if err != nil { - t.Fatal(err) - } - - if len(tags) != 2 { - t.Errorf("Lookup of descA returned %d tags, expected 2", len(tags)) - } - - tags, err = tagStore.Lookup(ctx, desc0) - if err != nil { - t.Fatal(err) - } - - if len(tags) != 2 { - t.Errorf("Lookup of descB returned %d tags, expected 2", len(tags)) - } - -} diff --git a/vendor/github.com/docker/distribution/registry/storage/util.go b/vendor/github.com/docker/distribution/registry/storage/util.go deleted file mode 100644 index 773d7ba0..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/util.go +++ /dev/null @@ -1,21 +0,0 @@ -package storage - -import ( - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" -) - -// Exists provides a utility method to test whether or not a path exists in -// the given driver. -func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) { - if _, err := drv.Stat(ctx, path); err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return false, nil - default: - return false, err - } - } - - return true, nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/vacuum.go b/vendor/github.com/docker/distribution/registry/storage/vacuum.go deleted file mode 100644 index 60d5a2fa..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/vacuum.go +++ /dev/null @@ -1,65 +0,0 @@ -package storage - -import ( - "path" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver" -) - -// vacuum contains functions for cleaning up repositories and blobs -// These functions will only reliably work on strongly consistent -// storage systems. -// https://en.wikipedia.org/wiki/Consistency_model - -// NewVacuum creates a new Vacuum -func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { - return Vacuum{ - ctx: ctx, - driver: driver, - } -} - -// Vacuum removes content from the filesystem -type Vacuum struct { - driver driver.StorageDriver - ctx context.Context -} - -// RemoveBlob removes a blob from the filesystem -func (v Vacuum) RemoveBlob(dgst string) error { - d, err := digest.ParseDigest(dgst) - if err != nil { - return err - } - - blobPath, err := pathFor(blobDataPathSpec{digest: d}) - if err != nil { - return err - } - context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) - err = v.driver.Delete(v.ctx, blobPath) - if err != nil { - return err - } - - return nil -} - -// RemoveRepository removes a repository directory from the -// filesystem -func (v Vacuum) RemoveRepository(repoName string) error { - rootForRepository, err := pathFor(repositoriesRootPathSpec{}) - if err != nil { - return err - } - repoDir := path.Join(rootForRepository, repoName) - context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) - err = v.driver.Delete(v.ctx, repoDir) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/walk.go b/vendor/github.com/docker/distribution/registry/storage/walk.go deleted file mode 100644 index d979796e..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/walk.go +++ /dev/null @@ -1,59 +0,0 @@ -package storage - -import ( - "errors" - "fmt" - "sort" - - "github.com/docker/distribution/context" - storageDriver "github.com/docker/distribution/registry/storage/driver" -) - -// ErrSkipDir is used as a return value from onFileFunc to indicate that -// the directory named in the call is to be skipped. It is not returned -// as an error by any function. -var ErrSkipDir = errors.New("skip this directory") - -// WalkFn is called once per file by Walk -// If the returned error is ErrSkipDir and fileInfo refers -// to a directory, the directory will not be entered and Walk -// will continue the traversal. Otherwise Walk will return -type WalkFn func(fileInfo storageDriver.FileInfo) error - -// Walk traverses a filesystem defined within driver, starting -// from the given path, calling f on each file -func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { - children, err := driver.List(ctx, from) - if err != nil { - return err - } - sort.Stable(sort.StringSlice(children)) - for _, child := range children { - // TODO(stevvooe): Calling driver.Stat for every entry is quite - // expensive when running against backends with a slow Stat - // implementation, such as s3. This is very likely a serious - // performance bottleneck. - fileInfo, err := driver.Stat(ctx, child) - if err != nil { - return err - } - err = f(fileInfo) - skipDir := (err == ErrSkipDir) - if err != nil && !skipDir { - return err - } - - if fileInfo.IsDir() && !skipDir { - if err := Walk(ctx, driver, child, f); err != nil { - return err - } - } - } - return nil -} - -// pushError formats an error type given a path and an error -// and pushes it to a slice of errors -func pushError(errors []error, path string, err error) []error { - return append(errors, fmt.Errorf("%s: %s", path, err)) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/walk_test.go b/vendor/github.com/docker/distribution/registry/storage/walk_test.go deleted file mode 100644 index 42f67dba..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/walk_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package storage - -import ( - "fmt" - "sort" - "testing" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Context) { - d := inmemory.New() - ctx := context.Background() - - expected := map[string]string{ - "/a": "dir", - "/a/b": "dir", - "/a/b/c": "dir", - "/a/b/c/d": "file", - "/a/b/c/e": "file", - "/a/b/f": "dir", - "/a/b/f/g": "file", - "/a/b/f/h": "file", - "/a/b/f/i": "file", - "/z": "dir", - "/z/y": "file", - } - - for p, typ := range expected { - if typ != "file" { - continue - } - - if err := d.PutContent(ctx, p, []byte(p)); err != nil { - t.Fatalf("unable to put content into fixture: %v", err) - } - } - - return d, expected, ctx -} - -func TestWalkErrors(t *testing.T) { - d, expected, ctx := testFS(t) - fileCount := len(expected) - err := Walk(ctx, d, "", func(fileInfo driver.FileInfo) error { - return nil - }) - if err == nil { - t.Error("Expected invalid root err") - } - - errEarlyExpected := fmt.Errorf("Early termination") - - err = Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { - // error on the 2nd file - if fileInfo.Path() == "/a/b" { - return errEarlyExpected - } - - delete(expected, fileInfo.Path()) - return nil - }) - if len(expected) != fileCount-1 { - t.Error("Walk failed to terminate with error") - } - if err != errEarlyExpected { - if err == nil { - t.Fatalf("expected an error due to early termination") - } else { - t.Error(err.Error()) - } - } - - err = Walk(ctx, d, "/nonexistant", func(fileInfo driver.FileInfo) error { - return nil - }) - if err == nil { - t.Errorf("Expected missing file err") - } - -} - -func TestWalk(t *testing.T) { - d, expected, ctx := testFS(t) - var traversed []string - err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - filetype, ok := expected[filePath] - if !ok { - t.Fatalf("Unexpected file in walk: %q", filePath) - } - - if fileInfo.IsDir() { - if filetype != "dir" { - t.Errorf("Unexpected file type: %q", filePath) - } - } else { - if filetype != "file" { - t.Errorf("Unexpected file type: %q", filePath) - } - - // each file has its own path as the contents. If the length - // doesn't match the path length, fail. - if fileInfo.Size() != int64(len(fileInfo.Path())) { - t.Fatalf("unexpected size for %q: %v != %v", - fileInfo.Path(), fileInfo.Size(), len(fileInfo.Path())) - } - } - delete(expected, filePath) - traversed = append(traversed, filePath) - return nil - }) - if len(expected) > 0 { - t.Errorf("Missed files in walk: %q", expected) - } - - if !sort.StringsAreSorted(traversed) { - t.Errorf("result should be sorted: %v", traversed) - } - - if err != nil { - t.Fatalf(err.Error()) - } -} - -func TestWalkSkipDir(t *testing.T) { - d, expected, ctx := testFS(t) - err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - if filePath == "/a/b" { - // skip processing /a/b/c and /a/b/c/d - return ErrSkipDir - } - delete(expected, filePath) - return nil - }) - if err != nil { - t.Fatalf(err.Error()) - } - if _, ok := expected["/a/b/c"]; !ok { - t.Errorf("/a/b/c not skipped") - } - if _, ok := expected["/a/b/c/d"]; !ok { - t.Errorf("/a/b/c/d not skipped") - } - if _, ok := expected["/a/b/c/e"]; !ok { - t.Errorf("/a/b/c/e not skipped") - } - -} diff --git a/vendor/github.com/docker/distribution/uuid/uuid_test.go b/vendor/github.com/docker/distribution/uuid/uuid_test.go deleted file mode 100644 index 09c3a7bb..00000000 --- a/vendor/github.com/docker/distribution/uuid/uuid_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package uuid - -import ( - "testing" -) - -const iterations = 1000 - -func TestUUID4Generation(t *testing.T) { - for i := 0; i < iterations; i++ { - u := Generate() - - if u[6]&0xf0 != 0x40 { - t.Fatalf("version byte not correctly set: %v, %08b %08b", u, u[6], u[6]&0xf0) - } - - if u[8]&0xc0 != 0x80 { - t.Fatalf("top order 8th byte not correctly set: %v, %b", u, u[8]) - } - } -} - -func TestParseAndEquality(t *testing.T) { - for i := 0; i < iterations; i++ { - u := Generate() - - parsed, err := Parse(u.String()) - if err != nil { - t.Fatalf("error parsing uuid %v: %v", u, err) - } - - if parsed != u { - t.Fatalf("parsing round trip failed: %v != %v", parsed, u) - } - } - - for _, c := range []string{ - "bad", - "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // correct length, incorrect format - " 20cc7775-2671-43c7-8742-51d1cfa23258", // leading space - "20cc7775-2671-43c7-8742-51d1cfa23258 ", // trailing space - "00000000-0000-0000-0000-x00000000000", // out of range character - } { - if _, err := Parse(c); err == nil { - t.Fatalf("parsing %q should have failed", c) - } - } -} diff --git a/vendor/github.com/docker/docker/.dockerignore b/vendor/github.com/docker/docker/.dockerignore index 37abdef4..6e43c2a9 100644 --- a/vendor/github.com/docker/docker/.dockerignore +++ b/vendor/github.com/docker/docker/.dockerignore @@ -1,2 +1,4 @@ -bundles -.gopath +./bin +./.dapper +./dist +./.trash-cache diff --git a/vendor/github.com/docker/docker/.gitignore b/vendor/github.com/docker/docker/.gitignore index fd91afdf..d43bb90f 100644 --- a/vendor/github.com/docker/docker/.gitignore +++ b/vendor/github.com/docker/docker/.gitignore @@ -1,39 +1,4 @@ -# Docker project generated files to ignore -# if you want to ignore files created by your editor/tools, -# please consider a global .gitignore https://help.github.com/articles/ignoring-files -*.exe -*.exe~ -*.orig -*.rej -*.test -.*.swp -.DS_Store -.bashrc -.dotcloud -.flymake* -.git/ -.gopath/ -.hg/ -.vagrant* -Vagrantfile -a.out -autogen/ -bin -build_src -bundles/ -docker/docker -dockerversion/version_autogen.go -docs/AWS_S3_BUCKET -docs/GITCOMMIT -docs/GIT_BRANCH -docs/VERSION -docs/_build -docs/_static -docs/_templates -docs/changed-files -# generated by man/md2man-all.sh -man/man1 -man/man5 -man/man8 -pyenv -vendor/pkg/ +/.dapper +/bin +*.swp +/.trash-cache diff --git a/vendor/github.com/docker/docker/.mailmap b/vendor/github.com/docker/docker/.mailmap deleted file mode 100644 index 8348b4a8..00000000 --- a/vendor/github.com/docker/docker/.mailmap +++ /dev/null @@ -1,171 +0,0 @@ -# Generate AUTHORS: hack/generate-authors.sh - -# Tip for finding duplicates (besides scanning the output of AUTHORS for name -# duplicates that aren't also email duplicates): scan the output of: -# git log --format='%aE - %aN' | sort -uf -# -# For explanation on this file format: man git-shortlog - -Patrick Stapleton -Shishir Mahajan -Erwin van der Koogh -Ahmed Kamal -Tejesh Mehta -Cristian Staretu -Cristian Staretu -Cristian Staretu -Marcus Linke -Aleksandrs Fadins -Christopher Latham -Hu Keping -Wayne Chang -Chen Chao -Daehyeok Mun - - - - - - -Guillaume J. Charmes - - - - - -Thatcher Peskens -Thatcher Peskens -Thatcher Peskens dhrp -Jérôme Petazzoni jpetazzo -Jérôme Petazzoni -Joffrey F -Joffrey F -Joffrey F -Tim Terhorst -Andy Smith - - - - - - - - - -Walter Stanish - -Roberto Hashioka -Konstantin Pelykh -David Sissitka -Nolan Darilek - -Benoit Chesneau -Jordan Arentsen -Daniel Garcia -Miguel Angel Fernández -Bhiraj Butala -Faiz Khan -Victor Lyuboslavsky -Jean-Baptiste Barth -Matthew Mueller - -Shih-Yuan Lee -Daniel Mizyrycki root -Jean-Baptiste Dalido - - - - - - - - - - - - - - -Sven Dowideit -Sven Dowideit -Sven Dowideit -Sven Dowideit <¨SvenDowideit@home.org.au¨> -Sven Dowideit -Sven Dowideit - -Alexandr Morozov - -O.S. Tezer - -Roberto G. Hashioka - - - - - -Sridhar Ratnakumar -Sridhar Ratnakumar -Liang-Chi Hsieh -Aleksa Sarai -Will Weaver -Timothy Hobbs -Nathan LeClaire -Nathan LeClaire - - - - -Matthew Heon - - - -Francisco Carriedo - - - - - -Brian Goff - - - -Hollie Teal - - - -Jessica Frazelle Jessie Frazelle - - - - - -Thomas LEVEIL Thomas LÉVEIL - - -Antonio Murdaca -Antonio Murdaca -Antonio Murdaca -Darren Shepherd -Deshi Xiao -Deshi Xiao -Doug Davis -Jacob Atzen -Jeff Nickoloff - -John Howard (VM) John Howard -Madhu Venugopal -Mary Anthony -Mary Anthony moxiegirl -Mary Anthony -mattyw -resouer -AJ Bowen soulshake -AJ Bowen soulshake -Tibor Vass -Tibor Vass -Vincent Bernat -Yestin Sun -bin liu -John Howard (VM) jhowardmsft -Ankush Agarwal -Tangi COLIN tangicolin diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS deleted file mode 100644 index cfc67e32..00000000 --- a/vendor/github.com/docker/docker/AUTHORS +++ /dev/null @@ -1,1033 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Huslage -Aaron Welch -Abel Muiño -Abhinav Ajgaonkar -Abhishek Chanda -Abin Shahab -Adam Miller -Adam Singer -Aditya -Adria Casas -Adrian Mouat -Adrien Folie -Ahmed Kamal -Ahmet Alp Balkan -Aidan Hobson Sayers -AJ Bowen -Al Tobey -alambike -Alan Thompson -Albert Callarisa -Albert Zhang -Aleksa Sarai -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Gaynor -Alex Warhawk -Alexander Boyd -Alexander Larsson -Alexander Morozov -Alexander Shopov -Alexandr Morozov -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis THOMAS -Allen Madsen -almoehi -Alvin Richards -amangoel -Amit Bakshi -Amy Lindburg -Anand Patil -AnandkumarPatel -Anchal Agrawal -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andrea Luzzardi -Andrea Turli -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Kuklewicz -Andrew Macgregor -Andrew Martin -Andrew Munsell -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Petrov -Andrey Stolbovsky -André Martins -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Rothfusz -Andy Smith -Andy Wilson -Anes Hasicic -Ankush Agarwal -Anthony Baire -Anthony Bishopric -Anton Löfgren -Anton Nikitin -Anton Tiurin -Antonio Murdaca -Antony Messerli -apocas -ArikaChen -Arnaud Porterie -Arthur Barr -Arthur Gautier -Asbjørn Enge -averagehuman -Avi Das -Avi Miller -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -bdevloed -Ben Firshman -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benoit Chesneau -Bernerd Schaefer -Bert Goethals -Bharath Thiruveedula -Bhiraj Butala -bin liu -Blake Geno -bobby abbott -boucher -Bouke Haarsma -Boyd Hemphill -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brent Salisbury -Brett Kochendorfer -Brian (bex) Exelbierd -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Shumate -Brice Jaglin -Briehan Lombaard -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -buddhamagnet -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Carl X. Su -Cary -Casey Bisson -Charles Hooper -Charles Lindsay -Charles Merriam -Charlie Lewis -Chen Chao -Chen Hanxiao -cheney90 -Chewey -Chia-liang Kao -chli -Chris Alfonso -Chris Armstrong -Chris Khoo -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Wahl -chrismckinnel -Christian Berendt -Christian Simon -Christian Stefanescu -ChristoperBiscardi -Christophe Troestler -Christopher Currie -Christopher Latham -Christopher Rigor -Christy Perez -Chun Chen -Ciro S. Costa -Clayton Coleman -Coenraad Loubser -Colin Dunklau -Colin Rice -Colin Walters -Colm Hally -Cory Forsyth -cressie176 -Cristian Staretu -Cruceru Calin-Cristian -Cyril F -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Griffin -Dan Hirsch -Dan Keder -Dan McPherson -Dan Stine -Dan Walsh -Dan Williams -Daniel Antlinger -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel Robinson -Daniel S -Daniel Von Fange -Daniel YC Lin -Daniel Zhang -Daniel, Dao Quang Minh -Danny Berger -Danny Yates -Darren Coxall -Darren Shepherd -Dave Henderson -David Anderson -David Calavera -David Corking -David Davis -David Gageot -David Gebler -David Mackey -David Mat -David Mcanulty -David Pelaez -David R. Jenni -David Röthlisberger -David Sissitka -David Xia -David Young -Davide Ceretti -Dawn Chen -decadent -Deng Guangxing -Deni Bertovic -Derek -Derek -Derek McGowan -Deric Crago -Deshi Xiao -Dinesh Subhraveti -DiuDiugirl -Djibril Koné -dkumor -Dmitry Demeshchuk -Dmitry Gusev -Dmitry V. Krivenok -Dolph Mathews -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Doug Davis -Doug MacEachern -doug tangren -Dr Nic Williams -dragon788 -Dražen Lučanin -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivind Uggedal -Elias Probst -Elijah Zupancic -eluck -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Enguerran -Eohyung Lee -Eric Hanchrow -Eric Lee -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Windisch -Eric-Olivier Lamey -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erno Hopearuoho -Erwin van der Koogh -Euan -Eugene Yakubovich -eugenkrizo -Evan Carmi -Evan Hazlett -Evan Krall -Evan Phoenix -Evan Wies -Evgeny Vereshchagin -Eystein Måløy Stenberg -ezbercih -Fabiano Rosas -Fabio Falci -Fabio Rehm -Fabrizio Regini -Faiz Khan -falmp -Fareed Dudhia -Felix Rabe -Felix Schindler -Ferenc Szabo -Fernando -Filipe Brandenburger -Flavio Castelli -FLGMwt -Florian Weingarten -Francisco Carriedo -Francisco Souza -Frank Herrmann -Frank Macreery -Frank Rosquin -Fred Lifton -Frederick F. Kautz IV -Frederik Loeffert -Freek Kalter -Félix Baylac-Jacqué -Gabe Rosenhouse -Gabor Nagy -Gabriel Monroy -Galen Sampson -Gareth Rushgrove -Gaurav -gautam, prasanna -GennadySpb -Geoffrey Bachelet -George MacRorie -George Xie -Gereon Frey -German DZ -Gert van Valkenhoef -Gianluca Borello -Giuseppe Mazzotta -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -Goffert van Gool -golubbe -Gosuke Miyashita -Graydon Hoare -Greg Fausak -Greg Thornton -grossws -grunny -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -guoxiuyan -Gurjeet Singh -Guruprasad -Günter Zöchbauer -Hans Rødtang -Harald Albers -Harley Laue -Harry Zhang -He Simei -Hector Castro -Henning Sprang -Hobofan -Hollie Teal -Hong Xu -Hu Keping -Hu Tao -Huayi Zhang -Hugo Duncan -Hunter Blanks -Huu Nguyen -hyeongkyu.lee -hyp3rdino -Ian Babrou -Ian Bishop -Ian Bull -Ian Calvert -Ian Main -Ian Truslove -Iavael -Igor Dolzhikov -ILYA Khlopotov -imre Fitos -inglesp -Isaac Dupree -Isabel Jimenez -Isao Jonas -Ivan Fraixedes -J Bruni -J. Nunn -Jack Danger Canty -Jacob Atzen -Jacob Edelman -Jake Champlin -Jake Moshenko -jakedt -James Allen -James Carr -James DeFelice -James Harrison Fisher -James Kyle -James Lal -James Mills -James Turnbull -Jamie Hannaford -Jamshid Afshar -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Jaap Driessen -Jana Radhakrishnan -Jared Biel -Jaroslaw Zabiello -jaseg -Jason Divock -Jason Giedymin -Jason Hall -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -Jay -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Paul Calderone -Jean-Tiare Le Bigot -Jeff Anderson -Jeff Lindsay -Jeff Nickoloff -Jeff Welch -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jeremy Grosser -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jezeniel Zapanta -jianbosun -Jilles Oldenbeuving -Jim Alateras -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -jimmyxian -Jinsoo Park -Jiri Popelka -Jiří Župka -jjy -jmzwcn -Joe Beda -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Howard (VM) -John OBrien III -John Tims -John Warwick -John Willis -Jon Wedaman -Jonas Pfenniger -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Dowland -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Joost Cassee -Jordan Arentsen -Jordan Sissel -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Josh -Josh Hawn -Josh Poimboeuf -Josiah Kiehl -José Tomás Albornoz -JP -Julian Taylor -Julien Barbier -Julien Bordellier -Julien Dubois -Jun-Ru Chang -Justin Force -Justin Plock -Justin Simonelis -Jyrki Puttonen -Jérôme Petazzoni -Jörg Thalheim -Kamil Domanski -Karan Lyons -kargakis -Karl Grzeszczak -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -Keli Hu -Ken Cochrane -Ken ICHIKAWA -Kent Johnson -Kevin "qwazerty" Houdebert -Kevin Clark -Kevin J. Lynagh -Kevin Menard -Kevin Wallace -Kevin Yap -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kimbro Staken -Kiran Gangadharan -Kirill SIbirev -knappe -Kohei Tsuruta -Konrad Kleine -Konstantin Pelykh -Krasimir Georgiev -krrg -Kyle Conroy -kyu -Lachlan Coote -Lajos Papp -Lakshan Perera -lalyos -Lance Chen -Lance Kinley -Lars Kellogg-Stedman -Lars R. Damerow -Laurie Voss -leeplay -Lei Jitang -Len Weincier -Leszek Kowalski -Levi Gross -Lewis Marshall -Lewis Peckover -Liana Lo -Liang-Chi Hsieh -limsy -Liu Hua -Lloyd Dewolf -Lokesh Mandvekar -Lorenz Leutgeb -Lorenzo Fontana -Louis Opter -Luis Martínez de Bartolomé Izquierdo -lukaspustina -lukemarsden -Lénaïc Huard -Ma Shimiao -Mabin -Madhu Venugopal -Mahesh Tiyyagura -malnick -Malte Janduda -Manfred Touron -Manfred Zabarauskas -Manuel Meurer -Manuel Woelker -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marco Hennings -Marcus Farkas -Marcus Linke -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna -Marius Voila -Mark Allen -Mark McGranaghan -Mark West -Marko Mikulicic -Marko Tibold -Markus Fix -Martijn Dwars -Martijn van Oosterhout -Martin Honermeyer -Martin Redmond -Mary Anthony -Masahito Zembutsu -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Le Marec - Pasquet -Matt Apperson -Matt Bachmann -Matt Bentley -Matt Haggard -Matt McCormick -Matthew Heon -Matthew Mayer -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -mattymo -mattyw -mauriyouth -Max Shytikov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mengdi Gao -Mert Yazıcıoğlu -Michael A. Smith -Michael Brown -Michael Chiang -Michael Crosby -Michael Gorsuch -Michael Hudson-Doyle -Michael Neale -Michael Prokop -Michael Scharf -Michael Stapelberg -Michael Steinert -Michael Thies -Michael West -Michal Fojtik -Michal Jemala -Michal Minar -Michaël Pailloncy -Michiel@unhosted -Miguel Angel Fernández -Mihai Borobocea -Mike Chelen -Mike Dillon -Mike Gaffney -Mike Leone -Mike MacCana -Mike Naberezny -Mike Snitzer -Mikhail Sobolev -Mingzhen Feng -Mitch Capper -Mohit Soni -Morgante Pell -Morten Siebuhr -Moysés Borges -Mrunal Patel -mschurenko -Mustafa Akın -Médi-Rémi Hashim -Nan Monnand Deng -Naoki Orii -Natalie Parker -Nate Eagleson -Nate Jones -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Neal McBurnett -Nelson Chen -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick Irvine -Nick Parker -Nick Payne -Nick Stenning -Nick Stinemates -Nicolas De loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -NikolaMandic -nikolas -noducks -Nolan Darilek -nponeccop -Nuutti Kotivuori -nzwsch -O.S. Tezer -OddBloke -odk- -Oguz Bilgic -Oh Jinkyun -Ole Reifschneider -Olivier Gambier -pandrew -panticz -Pascal Borreli -Pascal Hartig -Patrick Devine -Patrick Hemmer -Patrick Stapleton -pattichen -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Hammond -Paul Jimenez -Paul Lietar -Paul Morie -Paul Nasrat -Paul Weaver -Pavel Lobashov -Pavel Tikhomirov -Pavlos Ratis -Peggy Li -Peter Bourgon -Peter Braden -Peter Choi -Peter Dave Hello -Peter Ericson -Peter Esbensen -Peter Salvatore -Peter Volpe -Peter Waller -Phil -Phil Estes -Phil Spitler -Philipp Weissensteiner -Phillip Alexander -Piergiuliano Bossi -Pierre -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -pixelistik -Porjo -Pradeep Chhetri -Prasanna Gautam -Przemek Hejman -pysqz -Qiang Huang -Quentin Brossard -r0n22 -Rafal Jeczalik -Rafe Colton -Raghuram Devarakonda -Rajat Pandit -Rajdeep Dua -Ralph Bean -Ramkumar Ramachandra -Ramon van Alteren -Recursive Madman -Remi Rampin -Renato Riccieri Santos Zannon -resouer -rgstephens -Rhys Hiltner -Rich Seymour -Richard -Richard Burnison -Richard Harvey -Richard Metzler -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Robert Bachmann -Robert Bittle -Robert Obryk -Roberto G. Hashioka -Robin Speekenbrink -robpc -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Roland Huß -Roland Moriz -Ron Smits -root -Rovanion Luckey -Rudolph Gottesheim -Ryan Anderson -Ryan Aslett -Ryan Detzel -Ryan Fowler -Ryan O'Donnell -Ryan Seto -Ryan Thomas -Rémy Greinhofer -s. rannou -s00318865 -Sabin Basyal -Sachin Joshi -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Reis -Sam Rijs -Sami Wagiaalla -Samuel Andaya -Samuel PHAN -Sankar சங்கர் -Sanket Saurav -sapphiredev -Satnam Singh -satoru -Satoshi Amemiya -Scott Bessler -Scott Collier -Scott Johnston -Scott Stamp -Scott Walls -sdreyesg -Sean Cronin -Sean P. Kane -Sebastiaan van Steenis -Sebastiaan van Stijn -Senthil Kumar Selvaraj -SeongJae Park -Seongyeol Lim -Sergey Alekseev -Sergey Evstifeev -Shane Canon -shaunol -Shawn Landden -Shawn Siefkas -Shih-Yuan Lee -Shijiang Wei -Shishir Mahajan -shuai-z -sidharthamani -Silas Sewell -Simei He -Simon Eskildsen -Simon Leinen -Simon Taranto -Sindhu S -Sjoerd Langkemper -Solomon Hykes -Song Gao -Soulou -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -Srini Brahmaroutu -Steeve Morin -Stefan Praszalowicz -Stephen Crosby -Stephen J Day -Steve Francia -Steve Koch -Steven Burgess -Steven Merrill -Steven Richards -Steven Taylor -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien Luttringer -Sébastien Stormacq -tang0th -Tangi COLIN -Tatsuki Sugiura -Tatsushi Inagaki -Ted M. Young -Tehmasp Chaudhri -Tejesh Mehta -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Hansen -Thomas LEVEIL -Thomas Orozco -Thomas Schroeter -Thomas Sjögren -Thomas Texier -Tianon Gravi -Tibor Vass -Tiffany Low -Tim Bosse -Tim Hockin -Tim Ruffles -Tim Smith -Tim Terhorst -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Gesellchen -Tobias Schmidt -Tobias Schwab -Todd Lunter -Todd Whiteman -Tom Fotherby -Tom Hulihan -Tom Maaswinkel -Tomas Tomecek -Tomasz Lipinski -Tomasz Nurkiewicz -Tommaso Visconti -Tomáš Hrčka -Tonis Tiigi -Tonny Xu -Tony Daws -Tony Miller -Torstein Husebø -tpng -Travis Cline -Travis Thieman -Trent Ogren -Tristan Carel -Tyler Brock -Tzu-Jung Lee -Ulysse Carion -unknown -vagrant -Vaidas Jablonskis -vgeta -Victor Coisne -Victor Lyuboslavsky -Victor Marmol -Victor Vieux -Viktor Vojnovski -Vincent Batts -Vincent Bernat -Vincent Bernat -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Rutsky -VladimirAus -Vojtech Vitek (V-Teq) -waitingkuo -Walter Leibbrandt -Walter Stanish -Ward Vandewege -WarheadsSE -Wayne Chang -Wei-Ting Kuo -Wes Morgan -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Riancho -William Thurston -WiseTrem -wlan0 -Wolfgang Powisch -wonderflow -xamyzhao -XiaoBing Jiang -Xinzi Zhou -Xiuming Chen -xuzhaokui -y00277921 -Yahya -YAMADA Tsuyoshi -Yan Feng -Yang Bai -Yasunori Mahata -Yestin Sun -Yihang Ho -Yohei Ueda -Yongzhi Pan -Yuan Sun -Yurii Rashkovskii -Zac Dover -Zach Borboa -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Wei -Zhang Wentao -Zilin Du -zimbatm -Zoltan Tombol -zqh -Álex González -Álvaro Lázaro -尹吉峰 diff --git a/vendor/github.com/docker/docker/CHANGELOG.md b/vendor/github.com/docker/docker/CHANGELOG.md deleted file mode 100644 index e9de81dc..00000000 --- a/vendor/github.com/docker/docker/CHANGELOG.md +++ /dev/null @@ -1,1956 +0,0 @@ -# Changelog - -Items starting with `DEPRECATE` are important deprecation notices. For more -information on the list of deprecated flags and APIs please have a look at -https://docs.docker.com/misc/deprecated/ where target removal dates can also -be found. - -## 1.9.0 (2015-11-03) - -## Runtime - -+ `docker stats` now returns block IO metrics (#15005) -+ `docker stats` now details network stats per interface (#15786) -+ Add `ancestor=` filter to `docker ps --filter` flag to filter -containers based on their ancestor images (#14570) -+ Add `label=` filter to `docker ps --filter` to filter containers -based on label (#16530) -+ Add `--kernel-memory` flag to `docker run` (#14006) -+ Add `--message` flag to `docker import` allowing to specify an optional -message (#15711) -+ Add `--privileged` flag to `docker exec` (#14113) -+ Add `--stop-signal` flag to `docker run` allowing to replace the container -process stopping signal (#15307) -+ Add a new `unless-stopped` restart policy (#15348) -+ Inspecting an image now returns tags (#13185) -+ Add container size information to `docker inspect` (#15796) -+ Add `RepoTags` and `RepoDigests` field to `/images/{name:.*}/json` (#17275) -- Remove the deprecated `/container/ps` endpoint from the API (#15972) -- Send and document correct HTTP codes for `/exec//start` (#16250) -- Share shm and mqueue between containers sharing IPC namespace (#15862) -- Event stream now shows OOM status when `--oom-kill-disable` is set (#16235) -- Ensure special network files (/etc/hosts etc.) are read-only if bind-mounted -with `ro` option (#14965) -- Improve `rmi` performance (#16890) -- Do not update /etc/hosts for the default bridge network, except for links (#17325) -- Fix conflict with duplicate container names (#17389) -- Fix an issue with incorrect template execution in `docker inspect` (#17284) -- DEPRECATE `-c` short flag variant for `--cpu-shares` in docker run (#16271) - -## Client - -+ Allow `docker import` to import from local files (#11907) - -## Builder - -+ Add a `STOPSIGNAL` Dockerfile instruction allowing to set a different -stop-signal for the container process (#15307) -+ Add an `ARG` Dockerfile instruction and a `--build-arg` flag to `docker build` -that allows to add build-time environment variables (#15182) -- Improve cache miss performance (#16890) - -## Storage - -- devicemapper: Implement deferred deletion capability (#16381) - -## Networking - -+ `docker network` exits experimental and is part of standard release (#16645) -+ New network top-level concept, with associated subcommands and API (#16645) - WARNING: the API is different from the experimental API -+ Support for multiple isolated/micro-segmented networks (#16645) -+ Built-in multihost networking using VXLAN based overlay driver (#14071) -+ Support for third-party network plugins (#13424) -+ Ability to dynamically connect containers to multiple networks (#16645) -+ Support for user-defined IP address management via pluggable IPAM drivers (#16910) -+ Add daemon flags `--cluster-store` and `--cluster-advertise` for built-in nodes discovery (#16229) -+ Add `--cluster-store-opt` for setting up TLS settings (#16644) -+ Add `--dns-opt` to the daemon (#16031) -- DEPRECATE following container `NetworkSettings` fields in API v1.21: `EndpointID`, `Gateway`, - `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, `IPPrefixLen`, `IPv6Gateway` and `MacAddress`. - Those are now specific to the `bridge` network. Use `NetworkSettings.Networks` to inspect - the networking settings of a container per network. - -## Volumes - -+ New top-level `volume` subcommand and API (#14242) -- Move API volume driver settings to host-specific config (#15798) -- Print an error message if volume name is not unique (#16009) -- Ensure volumes created from Dockerfiles always use the local volume driver -(#15507) -- DEPRECATE auto-creating missing host paths for bind mounts (#16349) - -## Logging - -+ Add `awslogs` logging driver for Amazon CloudWatch (#15495) -+ Add generic `tag` log option to allow customizing container/image -information passed to driver (e.g. show container names) (#15384) -- Implement the `docker logs` endpoint for the journald driver (#13707) -- DEPRECATE driver-specific log tags (e.g. `syslog-tag`, etc.) (#15384) - -## Distribution - -+ `docker search` now works with partial names (#16509) -- Push optimization: avoid buffering to file (#15493) -- The daemon will display progress for images that were already being pulled -by another client (#15489) -- Only permissions required for the current action being performed are requested (#) -+ Renaming trust keys (and respective environment variables) from `offline` to -`root` and `tagging` to `repository` (#16894) -- DEPRECATE trust key environment variables -`DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and -`DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` (#16894) - -## Security - -+ Add SELinux profiles to the rpm package (#15832) -- Fix various issues with AppArmor profiles provided in the deb package -(#14609) -- Add AppArmor policy that prevents writing to /proc (#15571) - -## 1.8.3 (2015-10-12) - -### Distribution - -- Fix layer IDs lead to local graph poisoning (CVE-2014-8178) -- Fix manifest validation and parsing logic errors allow pull-by-digest validation bypass (CVE-2014-8179) -+ Add `--disable-legacy-registry` to prevent a daemon from using a v1 registry - -## 1.8.2 (2015-09-10) - -### Distribution - -- Fixes rare edge case of handling GNU LongLink and LongName entries. -- Fix ^C on docker pull. -- Fix docker pull issues on client disconnection. -- Fix issue that caused the daemon to panic when loggers weren't configured properly. -- Fix goroutine leak pulling images from registry V2. - -### Runtime - -- Fix a bug mounting cgroups for docker daemons running inside docker containers. -- Initialize log configuration properly. - -### Client: - -- Handle `-q` flag in `docker ps` properly when there is a default format. - -### Networking - -- Fix several corner cases with netlink. - -### Contrib - -- Fix several issues with bash completion. - -## 1.8.1 (2015-08-12) - -### Distribution - -* Fix a bug where pushing multiple tags would result in invalid images - -## 1.8.0 (2015-08-11) - -### Distribution - -+ Trusted pull, push and build, disabled by default -* Make tar layers deterministic between registries -* Don't allow deleting the image of running containers -* Check if a tag name to load is a valid digest -* Allow one character repository names -* Add a more accurate error description for invalid tag name -* Make build cache ignore mtime - -### Cli - -+ Add support for DOCKER_CONFIG/--config to specify config file dir -+ Add --type flag for docker inspect command -+ Add formatting options to `docker ps` with `--format` -+ Replace `docker -d` with new subcommand `docker daemon` -* Zsh completion updates and improvements -* Add some missing events to bash completion -* Support daemon urls with base paths in `docker -H` -* Validate status= filter to docker ps -* Display when a container is in --net=host in docker ps -* Extend docker inspect to export image metadata related to graph driver -* Restore --default-gateway{,-v6} daemon options -* Add missing unpublished ports in docker ps -* Allow duration strings in `docker events` as --since/--until -* Expose more mounts information in `docker inspect` - -### Runtime - -+ Add new Fluentd logging driver -+ Allow `docker import` to load from local files -+ Add logging driver for GELF via UDP -+ Allow to copy files from host to containers with `docker cp` -+ Promote volume drivers from experimental to master -+ Add rollover options to json-file log driver, and --log-driver-opts flag -+ Add memory swappiness tuning options -* Remove cgroup read-only flag when privileged -* Make /proc, /sys, & /dev readonly for readonly containers -* Add cgroup bind mount by default -* Overlay: Export metadata for container and image in `docker inspect` -* Devicemapper: external device activation -* Devicemapper: Compare uuid of base device on startup -* Remove RC4 from the list of registry cipher suites -* Add syslog-facility option -* LXC execdriver compatibility with recent LXC versions -* Mark LXC execriver as deprecated (to be removed with the migration to runc) - -### Plugins - -* Separate plugin sockets and specs locations -* Allow TLS connections to plugins - -### Bug fixes - -- Add missing 'Names' field to /containers/json API output -- Make `docker rmi` of dangling images safe while pulling -- Devicemapper: Change default basesize to 100G -- Go Scheduler issue with sync.Mutex and gcc -- Fix issue where Search API endpoint would panic due to empty AuthConfig -- Set image canonical names correctly -- Check dockerinit only if lxc driver is used -- Fix ulimit usage of nproc -- Always attach STDIN if -i,--interactive is specified -- Show error messages when saving container state fails -- Fixed incorrect assumption on --bridge=none treated as disable network -- Check for invalid port specifications in host configuration -- Fix endpoint leave failure for --net=host mode -- Fix goroutine leak in the stats API if the container is not running -- Check for apparmor file before reading it -- Fix DOCKER_TLS_VERIFY being ignored -- Set umask to the default on startup -- Correct the message of pause and unpause a non-running container -- Adjust disallowed CpuShares in container creation -- ZFS: correctly apply selinux context -- Display empty string instead of when IP opt is nil -- `docker kill` returns error when container is not running -- Fix COPY/ADD quoted/json form -- Fix goroutine leak on logs -f with no output -- Remove panic in nat package on invalid hostport -- Fix container linking in Fedora 22 -- Fix error caused using default gateways outside of the allocated range -- Format times in inspect command with a template as RFC3339Nano -- Make registry client to accept 2xx and 3xx http status responses as successful -- Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order. -- Fix error when the docker ps format was not valid. -- Remove redundant ip forward check. -- Fix issue trying to push images to repository mirrors. -- Fix error cleaning up network entrypoints when there is an initialization issue. - -## 1.7.1 (2015-07-14) - -#### Runtime - -- Fix default user spawning exec process with `docker exec` -- Make `--bridge=none` not to configure the network bridge -- Publish networking stats properly -- Fix implicit devicemapper selection with static binaries -- Fix socket connections that hung intermittently -- Fix bridge interface creation on CentOS/RHEL 6.6 -- Fix local dns lookups added to resolv.conf -- Fix copy command mounting volumes -- Fix read/write privileges in volumes mounted with --volumes-from - -#### Remote API - -- Fix unmarshalling of Command and Entrypoint -- Set limit for minimum client version supported -- Validate port specification -- Return proper errors when attach/reattach fail - -#### Distribution - -- Fix pulling private images -- Fix fallback between registry V2 and V1 - -## 1.7.0 (2015-06-16) - -#### Runtime -+ Experimental feature: support for out-of-process volume plugins -* The userland proxy can be disabled in favor of hairpin NAT using the daemon’s `--userland-proxy=false` flag -* The `exec` command supports the `-u|--user` flag to specify the new process owner -+ Default gateway for containers can be specified daemon-wide using the `--default-gateway` and `--default-gateway-v6` flags -+ The CPU CFS (Completely Fair Scheduler) quota can be set in `docker run` using `--cpu-quota` -+ Container block IO can be controlled in `docker run` using`--blkio-weight` -+ ZFS support -+ The `docker logs` command supports a `--since` argument -+ UTS namespace can be shared with the host with `docker run --uts=host` - -#### Quality -* Networking stack was entirely rewritten as part of the libnetwork effort -* Engine internals refactoring -* Volumes code was entirely rewritten to support the plugins effort -+ Sending SIGUSR1 to a daemon will dump all goroutines stacks without exiting - -#### Build -+ Support ${variable:-value} and ${variable:+value} syntax for environment variables -+ Support resource management flags `--cgroup-parent`, `--cpu-period`, `--cpu-quota`, `--cpuset-cpus`, `--cpuset-mems` -+ git context changes with branches and directories -* The .dockerignore file support exclusion rules - -#### Distribution -+ Client support for v2 mirroring support for the official registry - -#### Bugfixes -* Firewalld is now supported and will automatically be used when available -* mounting --device recursively - -## 1.6.2 (2015-05-13) - -#### Runtime -- Revert change prohibiting mounting into /sys - -## 1.6.1 (2015-05-07) - -#### Security -- Fix read/write /proc paths (CVE-2015-3630) -- Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631) -- Fix opening of file-descriptor 1 (CVE-2015-3627) -- Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629) -- Prohibit mount of /sys - -#### Runtime -- Update AppArmor policy to not allow mounts - -## 1.6.0 (2015-04-07) - -#### Builder -+ Building images from an image ID -+ Build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` -+ `commit --change` to apply specified Dockerfile instructions while committing the image -+ `import --change` to apply specified Dockerfile instructions while importing the image -+ Builds no longer continue in the background when canceled with CTRL-C - -#### Client -+ Windows Support - -#### Runtime -+ Container and image Labels -+ `--cgroup-parent` for specifying a parent cgroup to place container cgroup within -+ Logging drivers, `json-file`, `syslog`, or `none` -+ Pulling images by ID -+ `--ulimit` to set the ulimit on a container -+ `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) - -## 1.5.0 (2015-02-10) - -#### Builder -+ Dockerfile to use for a given `docker build` can be specified with the `-f` flag -* Dockerfile and .dockerignore files can be themselves excluded as part of the .dockerignore file, thus preventing modifications to these files invalidating ADD or COPY instructions cache -* ADD and COPY instructions accept relative paths -* Dockerfile `FROM scratch` instruction is now interpreted as a no-base specifier -* Improve performance when exposing a large number of ports - -#### Hack -+ Allow client-side only integration tests for Windows -* Include docker-py integration tests against Docker daemon as part of our test suites - -#### Packaging -+ Support for the new version of the registry HTTP API -* Speed up `docker push` for images with a majority of already existing layers -- Fixed contacting a private registry through a proxy - -#### Remote API -+ A new endpoint will stream live container resource metrics and can be accessed with the `docker stats` command -+ Containers can be renamed using the new `rename` endpoint and the associated `docker rename` command -* Container `inspect` endpoint show the ID of `exec` commands running in this container -* Container `inspect` endpoint show the number of times Docker auto-restarted the container -* New types of event can be streamed by the `events` endpoint: ‘OOM’ (container died with out of memory), ‘exec_create’, and ‘exec_start' -- Fixed returned string fields which hold numeric characters incorrectly omitting surrounding double quotes - -#### Runtime -+ Docker daemon has full IPv6 support -+ The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools -+ The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted -+ Container total memory usage can be limited for `docker run` using the `—memory-swap` flag -* Major stability improvements for devicemapper storage driver -* Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted -* Better integration with host system: per-container iptable rules are moved to the DOCKER chain -- Fixed container exiting on out of memory to return an invalid exit code - -#### Other -* The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are properly taken into account by the client when connecting to the Docker daemon - -## 1.4.1 (2014-12-15) - -#### Runtime -- Fix issue with volumes-from and bind mounts not being honored after create - -## 1.4.0 (2014-12-11) - -#### Notable Features since 1.3.0 -+ Set key=value labels to the daemon (displayed in `docker info`), applied with - new `-label` daemon flag -+ Add support for `ENV` in Dockerfile of the form: - `ENV name=value name2=value2...` -+ New Overlayfs Storage Driver -+ `docker info` now returns an `ID` and `Name` field -+ Filter events by event name, container, or image -+ `docker cp` now supports copying from container volumes -- Fixed `docker tag`, so it honors `--force` when overriding a tag for existing - image. - -## 1.3.3 (2014-12-11) - -#### Security -- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) -- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) -- Validate image IDs (CVE-2014-9358) - -#### Runtime -- Fix an issue when image archives are being read slowly - -#### Client -- Fix a regression related to stdin redirection -- Fix a regression with `docker cp` when destination is the current directory - -## 1.3.2 (2014-11-20) - -#### Security -- Fix tar breakout vulnerability -* Extractions are now sandboxed chroot -- Security options are no longer committed to images - -#### Runtime -- Fix deadlock in `docker ps -f exited=1` -- Fix a bug when `--volumes-from` references a container that failed to start - -#### Registry -+ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 -* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag -- Skip the experimental registry v2 API when mirroring is enabled - -## 1.3.1 (2014-10-28) - -#### Security -* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry -+ Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified - -#### Runtime -- Fix issue where volumes would not be shared - -#### Client -- Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` -- Fix docker run output to non-TTY stdout - -#### Builder -- Fix escaping `$` for environment variables -- Fix issue with lowercase `onbuild` Dockerfile instruction -- Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` - -## 1.3.0 (2014-10-14) - -#### Notable features since 1.2.0 -+ Docker `exec` allows you to run additional processes inside existing containers -+ Docker `create` gives you the ability to create a container via the CLI without executing a process -+ `--security-opts` options to allow user to customize container labels and apparmor profiles -+ Docker `ps` filters -- Wildcard support to COPY/ADD -+ Move production URLs to get.docker.com from get.docker.io -+ Allocate IP address on the bridge inside a valid CIDR -+ Use drone.io for PR and CI testing -+ Ability to setup an official registry mirror -+ Ability to save multiple images with docker `save` - -## 1.2.0 (2014-08-20) - -#### Runtime -+ Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime -+ Auto-restart containers using policies -+ Use /var/lib/docker/tmp for large temporary files -+ `--cap-add` and `--cap-drop` to tweak what linux capability you want -+ `--device` to use devices in containers - -#### Client -+ `docker search` on private registries -+ Add `exited` filter to `docker ps --filter` -* `docker rm -f` now kills instead of stop -+ Support for IPv6 addresses in `--dns` flag - -#### Proxy -+ Proxy instances in separate processes -* Small bug fix on UDP proxy - -## 1.1.2 (2014-07-23) - -#### Runtime -+ Fix port allocation for existing containers -+ Fix containers restart on daemon restart - -#### Packaging -+ Fix /etc/init.d/docker issue on Debian - -## 1.1.1 (2014-07-09) - -#### Builder -* Fix issue with ADD - -## 1.1.0 (2014-07-03) - -#### Notable features since 1.0.1 -+ Add `.dockerignore` support -+ Pause containers during `docker commit` -+ Add `--tail` to `docker logs` - -#### Builder -+ Allow a tar file as context for `docker build` -* Fix issue with white-spaces and multi-lines in `Dockerfiles` - -#### Runtime -* Overall performance improvements -* Allow `/` as source of `docker run -v` -* Fix port allocation -* Fix bug in `docker save` -* Add links information to `docker inspect` - -#### Client -* Improve command line parsing for `docker commit` - -#### Remote API -* Improve status code for the `start` and `stop` endpoints - -## 1.0.1 (2014-06-19) - -#### Notable features since 1.0.0 -* Enhance security for the LXC driver - -#### Builder -* Fix `ONBUILD` instruction passed to grandchildren - -#### Runtime -* Fix events subscription -* Fix /etc/hostname file with host networking -* Allow `-h` and `--net=none` -* Fix issue with hotplug devices in `--privileged` - -#### Client -* Fix artifacts with events -* Fix a panic with empty flags -* Fix `docker cp` on Mac OS X - -#### Miscellaneous -* Fix compilation on Mac OS X -* Fix several races - -## 1.0.0 (2014-06-09) - -#### Notable features since 0.12.0 -* Production support - -## 0.12.0 (2014-06-05) - -#### Notable features since 0.11.0 -* 40+ various improvements to stability, performance and usability -* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file -* Inherit file permissions from the host on `ADD` -* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer -* The `images` command has a `-f`/`--filter` option to filter the list of images -* Add `--force-rm` to clean up after a failed build -* Standardize JSON keys in Remote API to CamelCase -* Pull from a docker run now assumes `latest` tag if not specified -* Enhance security on Linux capabilities and device nodes - -## 0.11.1 (2014-05-07) - -#### Registry -- Fix push and pull to private registry - -## 0.11.0 (2014-05-07) - -#### Notable features since 0.10.0 - -* SELinux support for mount and process labels -* Linked containers can be accessed by hostname -* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces -* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon -* Logs can now be returned with an optional timestamp -* Docker now works with registries that support SHA-512 -* Multiple registry endpoints are supported to allow registry mirrors - -## 0.10.0 (2014-04-08) - -#### Builder -- Fix printing multiple messages on a single line. Fixes broken output during builds. -- Follow symlinks inside container's root for ADD build instructions. -- Fix EXPOSE caching. - -#### Documentation -- Add the new options of `docker ps` to the documentation. -- Add the options of `docker restart` to the documentation. -- Update daemon docs and help messages for --iptables and --ip-forward. -- Updated apt-cacher-ng docs example. -- Remove duplicate description of --mtu from docs. -- Add missing -t and -v for `docker images` to the docs. -- Add fixes to the cli docs. -- Update libcontainer docs. -- Update images in docs to remove references to AUFS and LXC. -- Update the nodejs_web_app in the docs to use the new epel RPM address. -- Fix external link on security of containers. -- Update remote API docs. -- Add image size to history docs. -- Be explicit about binding to all interfaces in redis example. -- Document DisableNetwork flag in the 1.10 remote api. -- Document that `--lxc-conf` is lxc only. -- Add chef usage documentation. -- Add example for an image with multiple for `docker load`. -- Explain what `docker run -a` does in the docs. - -#### Contrib -- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. -- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. -- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. -- Add check-config script to contrib. -- Fix fish shell completion. - -#### Hack -* Clean up "go test" output from "make test" to be much more readable/scannable. -* Exclude more "definitely not unit tested Go source code" directories from hack/make/test. -+ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. -- Include contributed completions in Ubuntu PPA. -+ Add cli integration tests. -* Add tweaks to the hack scripts to make them simpler. - -#### Remote API -+ Add TLS auth support for API. -* Move git clone from daemon to client. -- Fix content-type detection in docker cp. -* Split API into 2 go packages. - -#### Runtime -* Support hairpin NAT without going through Docker server. -- devicemapper: succeed immediately when removing non-existing devices. -- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). -- devicemapper: increase timeout in waitClose to 10 seconds. -- devicemapper: ensure we shut down thin pool cleanly. -- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. -- devicemapper: avoid AB-BA deadlock. -- devicemapper: make shutdown better/faster. -- improve alpha sorting in mflag. -- Remove manual http cookie management because the cookiejar is being used. -- Use BSD raw mode on Darwin. Fixes nano, tmux and others. -- Add FreeBSD support for the client. -- Merge auth package into registry. -- Add deprecation warning for -t on `docker pull`. -- Remove goroutine leak on error. -- Update parseLxcInfo to comply with new lxc1.0 format. -- Fix attach exit on darwin. -- Improve deprecation message. -- Retry to retrieve the layer metadata up to 5 times for `docker pull`. -- Only unshare the mount namespace for execin. -- Merge existing config when committing. -- Disable daemon startup timeout. -- Fix issue #4681: add loopback interface when networking is disabled. -- Add failing test case for issue #4681. -- Send SIGTERM to child, instead of SIGKILL. -- Show the driver and the kernel version in `docker info` even when not in debug mode. -- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. -- Fix issue caused by the absence of /etc/apparmor.d. -- Don't leave empty cidFile behind when failing to create the container. -- Mount cgroups automatically if they're not mounted already. -- Use mock for search tests. -- Update to double-dash everywhere. -- Move .dockerenv parsing to lxc driver. -- Move all bind-mounts in the container inside the namespace. -- Don't use separate bind mount for container. -- Always symlink /dev/ptmx for libcontainer. -- Don't kill by pid for other drivers. -- Add initial logging to libcontainer. -* Sort by port in `docker ps`. -- Move networking drivers into runtime top level package. -+ Add --no-prune to `docker rmi`. -+ Add time since exit in `docker ps`. -- graphdriver: add build tags. -- Prevent allocation of previously allocated ports & prevent improve port allocation. -* Add support for --since/--before in `docker ps`. -- Clean up container stop. -+ Add support for configurable dns search domains. -- Add support for relative WORKDIR instructions. -- Add --output flag for docker save. -- Remove duplication of DNS entries in config merging. -- Add cpuset.cpus to cgroups and native driver options. -- Remove docker-ci. -- Promote btrfs. btrfs is no longer considered experimental. -- Add --input flag to `docker load`. -- Return error when existing bridge doesn't match IP address. -- Strip comments before parsing line continuations to avoid interpreting instructions as comments. -- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. -- Add systemd implementation of cgroups and make containers show up as systemd units. -- Fix commit and import when no repository is specified. -- Remount /var/lib/docker as --private to fix scaling issue. -- Use the environment's proxy when pinging the remote registry. -- Reduce error level from harmless errors. -* Allow --volumes-from to be individual files. -- Fix expanding buffer in StdCopy. -- Set error regardless of attach or stdin. This fixes #3364. -- Add support for --env-file to load environment variables from files. -- Symlink /etc/mtab and /proc/mounts. -- Allow pushing a single tag. -- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. -- Don't throw error when starting an already running container. -- Fix dynamic port allocation limit. -- remove setupDev from libcontainer. -- Add API version to `docker version`. -- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. -- Fix --volumes-from mount failure. -- Allow non-privileged containers to create device nodes. -- Skip login tests because of external dependency on a hosted service. -- Deprecate `docker images --tree` and `docker images --viz`. -- Deprecate `docker insert`. -- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. -- Add specific error message when hitting 401 over HTTP on push. -- Fix absolute volume check. -- Remove volumes-from from the config. -- Move DNS options to hostconfig. -- Update the apparmor profile for libcontainer. -- Add deprecation notice for `docker commit -run`. - -## 0.9.1 (2014-03-24) - -#### Builder -- Fix printing multiple messages on a single line. Fixes broken output during builds. - -#### Documentation -- Fix external link on security of containers. - -#### Contrib -- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. -- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. - -#### Hack -- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. - -#### Remote API -- Fix content-type detection in `docker cp`. - -#### Runtime -- Use BSD raw mode on Darwin. Fixes nano, tmux and others. -- Only unshare the mount namespace for execin. -- Retry to retrieve the layer metadata up to 5 times for `docker pull`. -- Merge existing config when committing. -- Fix panic in monitor. -- Disable daemon startup timeout. -- Fix issue #4681: add loopback interface when networking is disabled. -- Add failing test case for issue #4681. -- Send SIGTERM to child, instead of SIGKILL. -- Show the driver and the kernel version in `docker info` even when not in debug mode. -- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. -- Fix issue caused by the absence of /etc/apparmor.d. -- Don't leave empty cidFile behind when failing to create the container. -- Improve deprecation message. -- Fix attach exit on darwin. -- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). -- devicemapper: succeed immediately when removing non-existing devices. -- devicemapper: increase timeout in waitClose to 10 seconds. -- Remove goroutine leak on error. -- Update parseLxcInfo to comply with new lxc1.0 format. - -## 0.9.0 (2014-03-10) - -#### Builder -- Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. -- Add error to docker build --rm. This adds missing error handling. -- Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. -- Make `--rm` the default for `docker build`. - -#### Documentation -- Download the docker client binary for Mac over https. -- Update the titles of the install instructions & descriptions. -* Add instructions for upgrading boot2docker. -* Add port forwarding example in OS X install docs. -- Attempt to disentangle repository and registry. -- Update docs to explain more about `docker ps`. -- Update sshd example to use a Dockerfile. -- Rework some examples, including the Python examples. -- Update docs to include instructions for a container's lifecycle. -- Update docs documentation to discuss the docs branch. -- Don't skip cert check for an example & use HTTPS. -- Bring back the memory and swap accounting section which was lost when the kernel page was removed. -- Explain DNS warnings and how to fix them on systems running and using a local nameserver. - -#### Contrib -- Add Tanglu support for mkimage-debootstrap. -- Add SteamOS support for mkimage-debootstrap. - -#### Hack -- Get package coverage when running integration tests. -- Remove the Vagrantfile. This is being replaced with boot2docker. -- Fix tests on systems where aufs isn't available. -- Update packaging instructions and remove the dependency on lxc. - -#### Remote API -* Move code specific to the API to the api package. -- Fix header content type for the API. Makes all endpoints use proper content type. -- Fix registry auth & remove ping calls from CmdPush and CmdPull. -- Add newlines to the JSON stream functions. - -#### Runtime -* Do not ping the registry from the CLI. All requests to registries flow through the daemon. -- Check for nil information return in the lxc driver. This fixes panics with older lxc versions. -- Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. -- Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. -* Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. -- Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. -- Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. -- Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. -- Fix `--run` in `docker commit`. This makes `docker commit --run` work again. -- Fix custom bridge related options. This makes custom bridges work again. -+ Mount-bind the PTY as container console. This allows tmux/screen to run. -+ Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. -+ Add native exec driver which uses libcontainer and make it the default exec driver. -- Add support for handling extended attributes in archives. -* Set the container MTU to be the same as the host MTU. -+ Add simple sha256 checksums for layers to speed up `docker push`. -* Improve kernel version parsing. -* Allow flag grouping (`docker run -it`). -- Remove chroot exec driver. -- Fix divide by zero to fix panic. -- Rewrite `docker rmi`. -- Fix docker info with lxc 1.0.0. -- Fix fedora tty with apparmor. -* Don't always append env vars, replace defaults with vars from config. -* Fix a goroutine leak. -* Switch to Go 1.2.1. -- Fix unique constraint error checks. -* Handle symlinks for Docker's data directory and for TMPDIR. -- Add deprecation warnings for flags (-flag is deprecated in favor of --flag) -- Add apparmor profile for the native execution driver. -* Move system specific code from archive to pkg/system. -- Fix duplicate signal for `docker run -i -t` (issue #3336). -- Return correct process pid for lxc. -- Add a -G option to specify the group which unix sockets belong to. -+ Add `-f` flag to `docker rm` to force removal of running containers. -+ Kill ghost containers and restart all ghost containers when the docker daemon restarts. -+ Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. - -## 0.8.1 (2014-02-18) - -#### Builder - -- Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper -- Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system -- Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported - -#### Documentation - -* Update issue filing instructions -* Warn against the use of symlinks for Docker's storage folder -* Replace the Firefox example with an IceWeasel example -* Rewrite the PostgresSQL example using a Dockerfile and add more details to it -* Improve the OS X documentation - -#### Remote API - -- Fix broken images API for version less than 1.7 -- Use the right encoding for all API endpoints which return JSON -- Move remote api client to api/ -- Queue calls to the API using generic socket wait - -#### Runtime - -- Fix the use of custom settings for bridges and custom bridges -- Refactor the devicemapper code to avoid many mount/unmount race conditions and failures -- Remove two panics which could make Docker crash in some situations -- Don't ping registry from the CLI client -- Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks -- Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration -- Remove directory when removing devicemapper device. This cleans up leftover mount directories -- Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration -- Ensure `docker cp` stream is closed properly -- Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper -- Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port -+ Mount-bind the PTY as container console. This allows tmux and screen to run in a container -- Clean up archive closing. This fixes and improves archive handling -- Fix engine tests on systems where temp directories are symlinked -- Add test methods for save and load -- Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart -- Support submodules when building from a GitHub repository -- Quote volume path to allow spaces -- Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs - -## 0.8.0 (2014-02-04) - -#### Notable features since 0.7.0 - -* Images and containers can be removed much faster -* Building an image from source with docker build is now much faster -* The Docker daemon starts and stops much faster -* The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations -* Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations -* All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar -* Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers -With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages -* Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change - -* The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed -* The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build -* Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write -* Docker is officially supported on Mac OS X -* The Docker daemon supports systemd socket activation - -## 0.7.6 (2014-01-14) - -#### Builder - -* Do not follow symlink outside of build context - -#### Runtime - -- Remount bind mounts when ro is specified -* Use https for fetching docker version - -#### Other - -* Inline the test.docker.io fingerprint -* Add ca-certificates to packaging documentation - -## 0.7.5 (2014-01-09) - -#### Builder - -* Disable compression for build. More space usage but a much faster upload -- Fix ADD caching for certain paths -- Do not compress archive from git build - -#### Documentation - -- Fix error in GROUP add example -* Make sure the GPG fingerprint is inline in the documentation -* Give more specific advice on setting up signing of commits for DCO - -#### Runtime - -- Fix misspelled container names -- Do not add hostname when networking is disabled -* Return most recent image from the cache by date -- Return all errors from docker wait -* Add Content-Type Header "application/json" to GET /version and /info responses - -#### Other - -* Update DCO to version 1.1 -+ Update Makefile to use "docker:GIT_BRANCH" as the generated image name -* Update Travis to check for new 1.1 DCO version - -## 0.7.4 (2014-01-07) - -#### Builder - -- Fix ADD caching issue with . prefixed path -- Fix docker build on devicemapper by reverting sparse file tar option -- Fix issue with file caching and prevent wrong cache hit -* Use same error handling while unmarshalling CMD and ENTRYPOINT - -#### Documentation - -* Simplify and streamline Amazon Quickstart -* Install instructions use unprefixed Fedora image -* Update instructions for mtu flag for Docker on GCE -+ Add Ubuntu Saucy to installation -- Fix for wrong version warning on master instead of latest - -#### Runtime - -- Only get the image's rootfs when we need to calculate the image size -- Correctly handle unmapping UDP ports -* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build -- Fix login message to say pull instead of push -- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN -* Make blank -H option default to the same as no -H was sent -* Extract cgroups utilities to own submodule - -#### Other - -+ Add Travis CI configuration to validate DCO and gofmt requirements -+ Add Developer Certificate of Origin Text -* Upgrade VBox Guest Additions -* Check standalone header when pinging a registry server - -## 0.7.3 (2014-01-02) - -#### Builder - -+ Update ADD to use the image cache, based on a hash of the added content -* Add error message for empty Dockerfile - -#### Documentation - -- Fix outdated link to the "Introduction" on www.docker.io -+ Update the docs to get wider when the screen does -- Add information about needing to install LXC when using raw binaries -* Update Fedora documentation to disentangle the docker and docker.io conflict -* Add a note about using the new `-mtu` flag in several GCE zones -+ Add FrugalWare installation instructions -+ Add a more complete example of `docker run` -- Fix API documentation for creating and starting Privileged containers -- Add missing "name" parameter documentation on "/containers/create" -* Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration -- Update the 1.8 API documentation with some additions that were added to the docs for 1.7 - -#### Hack - -- Add missing libdevmapper dependency to the packagers documentation -* Update minimum Go requirement to a hard line at Go 1.2+ -* Many minor improvements to the Vagrantfile -+ Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) -+ Add coverprofile generation reporting -- Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually -* Update Dockerfile to be more canonical and have less spurious warnings during build -- Fix some miscellaneous `docker pull` progress bar display issues -* Migrate more miscellaneous packages under the "pkg" folder -* Update TextMate highlighting to automatically be enabled for files named "Dockerfile" -* Reorganize syntax highlighting files under a common "contrib/syntax" directory -* Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation -* Add support for container names in bash completion - -#### Packaging - -+ Add an official Docker client binary for Darwin (Mac OS X) -* Remove empty "Vendor" string and added "License" on deb package -+ Add a stubbed version of "/etc/default/docker" in the deb package - -#### Runtime - -* Update layer application to extract tars in place, avoiding file churn while handling whiteouts -- Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) -* Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) -+ Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions -- Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files -* Update container name validation to include '.' -- Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected -* Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler -* Update to use proper box-drawing characters everywhere in `docker images -tree` -* Move MTU setting from LXC configuration to directly use netlink -* Add `-S` option to external tar invocation for more efficient spare file handling -+ Add arch/os info to User-Agent string, especially for registry requests -+ Add `-mtu` option to Docker daemon for configuring MTU -- Fix `docker build` to exit with a non-zero exit code on error -+ Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation - -## 0.7.2 (2013-12-16) - -#### Runtime - -+ Validate container names on creation with standard regex -* Increase maximum image depth to 127 from 42 -* Continue to move api endpoints to the job api -+ Add -bip flag to allow specification of dynamic bridge IP via CIDR -- Allow bridge creation when ipv6 is not enabled on certain systems -* Set hostname and IP address from within dockerinit -* Drop capabilities from within dockerinit -- Fix volumes on host when symlink is present the image -- Prevent deletion of image if ANY container is depending on it even if the container is not running -* Update docker push to use new progress display -* Use os.Lstat to allow mounting unix sockets when inspecting volumes -- Adjust handling of inactive user login -- Add missing defines in devicemapper for older kernels -- Allow untag operations with no container validation -- Add auth config to docker build - -#### Documentation - -* Add more information about Docker logging -+ Add RHEL documentation -* Add a direct example for changing the CMD that is run in a container -* Update Arch installation documentation -+ Add section on Trusted Builds -+ Add Network documentation page - -#### Other - -+ Add new cover bundle for providing code coverage reporting -* Separate integration tests in bundles -* Make Tianon the hack maintainer -* Update mkimage-debootstrap with more tweaks for keeping images small -* Use https to get the install script -* Remove vendored dotcloud/tar now that Go 1.2 has been released - -## 0.7.1 (2013-12-05) - -#### Documentation - -+ Add @SvenDowideit as documentation maintainer -+ Add links example -+ Add documentation regarding ambassador pattern -+ Add Google Cloud Platform docs -+ Add dockerfile best practices -* Update doc for RHEL -* Update doc for registry -* Update Postgres examples -* Update doc for Ubuntu install -* Improve remote api doc - -#### Runtime - -+ Add hostconfig to docker inspect -+ Implement `docker log -f` to stream logs -+ Add env variable to disable kernel version warning -+ Add -format to `docker inspect` -+ Support bind-mount for files -- Fix bridge creation on RHEL -- Fix image size calculation -- Make sure iptables are called even if the bridge already exists -- Fix issue with stderr only attach -- Remove init layer when destroying a container -- Fix same port binding on different interfaces -- `docker build` now returns the correct exit code -- Fix `docker port` to display correct port -- `docker build` now check that the dockerfile exists client side -- `docker attach` now returns the correct exit code -- Remove the name entry when the container does not exist - -#### Registry - -* Improve progress bars, add ETA for downloads -* Simultaneous pulls now waits for the first to finish instead of failing -- Tag only the top-layer image when pushing to registry -- Fix issue with offline image transfer -- Fix issue preventing using ':' in password for registry - -#### Other - -+ Add pprof handler for debug -+ Create a Makefile -* Use stdlib tar that now includes fix -* Improve make.sh test script -* Handle SIGQUIT on the daemon -* Disable verbose during tests -* Upgrade to go1.2 for official build -* Improve unit tests -* The test suite now runs all tests even if one fails -* Refactor C in Go (Devmapper) -- Fix OS X compilation - -## 0.7.0 (2013-11-25) - -#### Notable features since 0.6.0 - -* Storage drivers: choose from aufs, device-mapper, or vfs. -* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. -* Links: compose complex software stacks by connecting containers to each other. -* Container naming: organize your containers by giving them memorable names. -* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. -* Offline transfer: push and pull images to the filesystem without losing information. -* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. - -## 0.6.7 (2013-11-21) - -#### Runtime - -* Improve stability, fixes some race conditions -* Skip the volumes mounted when deleting the volumes of container. -* Fix layer size computation: handle hard links correctly -* Use the work Path for docker cp CONTAINER:PATH -* Fix tmp dir never cleanup -* Speedup docker ps -* More informative error message on name collisions -* Fix nameserver regex -* Always return long id's -* Fix container restart race condition -* Keep published ports on docker stop;docker start -* Fix container networking on Fedora -* Correctly express "any address" to iptables -* Fix network setup when reconnecting to ghost container -* Prevent deletion if image is used by a running container -* Lock around read operations in graph - -#### RemoteAPI - -* Return full ID on docker rmi - -#### Client - -+ Add -tree option to images -+ Offline image transfer -* Exit with status 2 on usage error and display usage on stderr -* Do not forward SIGCHLD to container -* Use string timestamp for docker events -since - -#### Other - -* Update to go 1.2rc5 -+ Add /etc/default/docker support to upstart - -## 0.6.6 (2013-11-06) - -#### Runtime - -* Ensure container name on register -* Fix regression in /etc/hosts -+ Add lock around write operations in graph -* Check if port is valid -* Fix restart runtime error with ghost container networking -+ Add some more colors and animals to increase the pool of generated names -* Fix issues in docker inspect -+ Escape apparmor confinement -+ Set environment variables using a file. -* Prevent docker insert to erase something -+ Prevent DNS server conflicts in CreateBridgeIface -+ Validate bind mounts on the server side -+ Use parent image config in docker build -* Fix regression in /etc/hosts - -#### Client - -+ Add -P flag to publish all exposed ports -+ Add -notrunc and -q flags to docker history -* Fix docker commit, tag and import usage -+ Add stars, trusted builds and library flags in docker search -* Fix docker logs with tty - -#### RemoteAPI - -* Make /events API send headers immediately -* Do not split last column docker top -+ Add size to history - -#### Other - -+ Contrib: Desktop integration. Firefox usecase. -+ Dockerfile: bump to go1.2rc3 - -## 0.6.5 (2013-10-29) - -#### Runtime - -+ Containers can now be named -+ Containers can now be linked together for service discovery -+ 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors -+ Automatically start crashed containers after a reboot -+ Expose IP, port, and proto as separate environment vars for container links -* Allow ports to be published to specific ips -* Prohibit inter-container communication by default -- Ignore ErrClosedPipe for stdin in Container.Attach -- Remove unused field kernelVersion -* Fix issue when mounting subdirectories of /mnt in container -- Fix untag during removal of images -* Check return value of syscall.Chdir when changing working directory inside dockerinit - -#### Client - -- Only pass stdin to hijack when needed to avoid closed pipe errors -* Use less reflection in command-line method invocation -- Monitor the tty size after starting the container, not prior -- Remove useless os.Exit() calls after log.Fatal - -#### Hack - -+ Add initial init scripts library and a safer Ubuntu packaging script that works for Debian -* Add -p option to invoke debootstrap with http_proxy -- Update install.sh with $sh_c to get sudo/su for modprobe -* Update all the mkimage scripts to use --numeric-owner as a tar argument -* Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues - -#### Other - -* Documentation: Fix the flags for nc in example -* Testing: Remove warnings and prevent mount issues -- Testing: Change logic for tty resize to avoid warning in tests -- Builder: Fix race condition in docker build with verbose output -- Registry: Fix content-type for PushImageJSONIndex method -* Contrib: Improve helper tools to generate debian and Arch linux server images - -## 0.6.4 (2013-10-16) - -#### Runtime - -- Add cleanup of container when Start() fails -* Add better comments to utils/stdcopy.go -* Add utils.Errorf for error logging -+ Add -rm to docker run for removing a container on exit -- Remove error messages which are not actually errors -- Fix `docker rm` with volumes -- Fix some error cases where a HTTP body might not be closed -- Fix panic with wrong dockercfg file -- Fix the attach behavior with -i -* Record termination time in state. -- Use empty string so TempDir uses the OS's temp dir automatically -- Make sure to close the network allocators -+ Autorestart containers by default -* Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` -* lxc: Allow set_file_cap capability in container -- Move run -rm to the cli only -* Split stdout stderr -* Always create a new session for the container - -#### Testing - -- Add aggregated docker-ci email report -- Add cleanup to remove leftover containers -* Add nightly release to docker-ci -* Add more tests around auth.ResolveAuthConfig -- Remove a few errors in tests -- Catch errClosing error when TCP and UDP proxies are terminated -* Only run certain tests with TESTFLAGS='-run TestName' make.sh -* Prevent docker-ci to test closing PRs -* Replace panic by log.Fatal in tests -- Increase TestRunDetach timeout - -#### Documentation - -* Add initial draft of the Docker infrastructure doc -* Add devenvironment link to CONTRIBUTING.md -* Add `apt-get install curl` to Ubuntu docs -* Add explanation for export restrictions -* Add .dockercfg doc -* Remove Gentoo install notes about #1422 workaround -* Fix help text for -v option -* Fix Ping endpoint documentation -- Fix parameter names in docs for ADD command -- Fix ironic typo in changelog -* Various command fixes in postgres example -* Document how to edit and release docs -- Minor updates to `postgresql_service.rst` -* Clarify LGTM process to contributors -- Corrected error in the package name -* Document what `vagrant up` is actually doing -+ improve doc search results -* Cleanup whitespace in API 1.5 docs -* use angle brackets in MAINTAINER example email -* Update archlinux.rst -+ Changes to a new style for the docs. Includes version switcher. -* Formatting, add information about multiline json -* Improve registry and index REST API documentation -- Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 -* Update Gentoo installation documentation now that we're in the portage tree proper -* Cleanup and reorganize docs and tooling for contributors and maintainers -- Minor spelling correction of protocoll -> protocol - -#### Contrib - -* Add vim syntax highlighting for Dockerfiles from @honza -* Add mkimage-arch.sh -* Reorganize contributed completion scripts to add zsh completion - -#### Hack - -* Add vagrant user to the docker group -* Add proper bash completion for "docker push" -* Add xz utils as a runtime dep -* Add cleanup/refactor portion of #2010 for hack and Dockerfile updates -+ Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link -* Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly -+ Add @tianon to hack/MAINTAINERS -* Improve network performance for VirtualBox -* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) -- Fix contrib/mkimage-debian.sh apt caching prevention -+ Add Dockerfile.tmLanguage to contrib -* Configured FPM to make /etc/init/docker.conf a config file -* Enable SSH Agent forwarding in Vagrant VM -* Several small tweaks/fixes for contrib/mkimage-debian.sh - -#### Other - -- Builder: Abort build if mergeConfig returns an error and fix duplicate error message -- Packaging: Remove deprecated packaging directory -- Registry: Use correct auth config when logging in. -- Registry: Fix the error message so it is the same as the regex - -## 0.6.3 (2013-09-23) - -#### Packaging - -* Add 'docker' group on install for ubuntu package -* Update tar vendor dependency -* Download apt key over HTTPS - -#### Runtime - -- Only copy and change permissions on non-bindmount volumes -* Allow multiple volumes-from -- Fix HTTP imports from STDIN - -#### Documentation - -* Update section on extracting the docker binary after build -* Update development environment docs for new build process -* Remove 'base' image from documentation - -#### Other - -- Client: Fix detach issue -- Registry: Update regular expression to match index - -## 0.6.2 (2013-09-17) - -#### Runtime - -+ Add domainname support -+ Implement image filtering with path.Match -* Remove unnecessary warnings -* Remove os/user dependency -* Only mount the hostname file when the config exists -* Handle signals within the `docker login` command -- UID and GID are now also applied to volumes -- `docker start` set error code upon error -- `docker run` set the same error code as the process started - -#### Builder - -+ Add -rm option in order to remove intermediate containers -* Allow multiline for the RUN instruction - -#### Registry - -* Implement login with private registry -- Fix push issues - -#### Other - -+ Hack: Vendor all dependencies -* Remote API: Bump to v1.5 -* Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. -* Documentation: General improvements - -## 0.6.1 (2013-08-23) - -#### Registry - -* Pass "meta" headers in API calls to the registry - -#### Packaging - -- Use correct upstart script with new build tool -- Use libffi-dev, don`t build it from sources -- Remove duplicate mercurial install command - -## 0.6.0 (2013-08-22) - -#### Runtime - -+ Add lxc-conf flag to allow custom lxc options -+ Add an option to set the working directory -* Add Image name to LogEvent tests -+ Add -privileged flag and relevant tests, docs, and examples -* Add websocket support to /container//attach/ws -* Add warning when net.ipv4.ip_forwarding = 0 -* Add hostname to environment -* Add last stable version in `docker version` -- Fix race conditions in parallel pull -- Fix Graph ByParent() to generate list of child images per parent image. -- Fix typo: fmt.Sprint -> fmt.Sprintf -- Fix small \n error un docker build -* Fix to "Inject dockerinit at /.dockerinit" -* Fix #910. print user name to docker info output -* Use Go 1.1.2 for dockerbuilder -* Use ranged for loop on channels -- Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete -- Improve CMD, ENTRYPOINT, and attach docs. -- Improve connect message with socket error -- Load authConfig only when needed and fix useless WARNING -- Show tag used when image is missing -* Apply volumes-from before creating volumes -- Make docker run handle SIGINT/SIGTERM -- Prevent crash when .dockercfg not readable -- Install script should be fetched over https, not http. -* API, issue 1471: Use groups for socket permissions -- Correctly detect IPv4 forwarding -* Mount /dev/shm as a tmpfs -- Switch from http to https for get.docker.io -* Let userland proxy handle container-bound traffic -* Update the Docker CLI to specify a value for the "Host" header. -- Change network range to avoid conflict with EC2 DNS -- Reduce connect and read timeout when pinging the registry -* Parallel pull -- Handle ip route showing mask-less IP addresses -* Allow ENTRYPOINT without CMD -- Always consider localhost as a domain name when parsing the FQN repos name -* Refactor checksum - -#### Documentation - -* Add MongoDB image example -* Add instructions for creating and using the docker group -* Add sudo to examples and installation to documentation -* Add ufw doc -* Add a reference to ps -a -* Add information about Docker`s high level tools over LXC. -* Fix typo in docs for docker run -dns -* Fix a typo in the ubuntu installation guide -* Fix to docs regarding adding docker groups -* Update default -H docs -* Update readme with dependencies for building -* Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 -* PostgreSQL service example in documentation -* Suggest installing linux-headers by default. -* Change the twitter handle -* Clarify Amazon EC2 installation -* 'Base' image is deprecated and should no longer be referenced in the docs. -* Move note about officially supported kernel -- Solved the logo being squished in Safari - -#### Builder - -+ Add USER instruction do Dockerfile -+ Add workdir support for the Buildfile -* Add no cache for docker build -- Fix docker build and docker events output -- Only count known instructions as build steps -- Make sure ENV instruction within build perform a commit each time -- Forbid certain paths within docker build ADD -- Repository name (and optionally a tag) in build usage -- Make sure ADD will create everything in 0755 - -#### Remote API - -* Sort Images by most recent creation date. -* Reworking opaque requests in registry module -* Add image name in /events -* Use mime pkg to parse Content-Type -* 650 http utils and user agent field - -#### Hack - -+ Bash Completion: Limit commands to containers of a relevant state -* Add docker dependencies coverage testing into docker-ci - -#### Packaging - -+ Docker-brew 0.5.2 support and memory footprint reduction -* Add new docker dependencies into docker-ci -- Revert "docker.upstart: avoid spawning a `sh` process" -+ Docker-brew and Docker standard library -+ Release docker with docker -* Fix the upstart script generated by get.docker.io -* Enabled the docs to generate manpages. -* Revert Bind daemon to 0.0.0.0 in Vagrant. - -#### Register - -* Improve auth push -* Registry unit tests + mock registry - -#### Tests - -* Improve TestKillDifferentUser to prevent timeout on buildbot -- Fix typo in TestBindMounts (runContainer called without image) -* Improve TestGetContainersTop so it does not rely on sleep -* Relax the lo interface test to allow iface index != 1 -* Add registry functional test to docker-ci -* Add some tests in server and utils - -#### Other - -* Contrib: bash completion script -* Client: Add docker cp command and copy api endpoint to copy container files/folders to the host -* Don`t read from stdout when only attached to stdin - -## 0.5.3 (2013-08-13) - -#### Runtime - -* Use docker group for socket permissions -- Spawn shell within upstart script -- Handle ip route showing mask-less IP addresses -- Add hostname to environment - -#### Builder - -- Make sure ENV instruction within build perform a commit each time - -## 0.5.2 (2013-08-08) - -* Builder: Forbid certain paths within docker build ADD -- Runtime: Change network range to avoid conflict with EC2 DNS -* API: Change daemon to listen on unix socket by default - -## 0.5.1 (2013-07-30) - -#### Runtime - -+ Add `ps` args to `docker top` -+ Add support for container ID files (pidfile like) -+ Add container=lxc in default env -+ Support networkless containers with `docker run -n` and `docker -d -b=none` -* Stdout/stderr logs are now stored in the same file as JSON -* Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. -* Change .dockercfg format to json and support multiple auth remote -- Do not override volumes from config -- Fix issue with EXPOSE override - -#### API - -+ Docker client now sets useragent (RFC 2616) -+ Add /events endpoint - -#### Builder - -+ ADD command now understands URLs -+ CmdAdd and CmdEnv now respect Dockerfile-set ENV variables -- Create directories with 755 instead of 700 within ADD instruction - -#### Hack - -* Simplify unit tests with helpers -* Improve docker.upstart event -* Add coverage testing into docker-ci - -## 0.5.0 (2013-07-17) - -#### Runtime - -+ List all processes running inside a container with 'docker top' -+ Host directories can be mounted as volumes with 'docker run -v' -+ Containers can expose public UDP ports (eg, '-p 123/udp') -+ Optionally specify an exact public port (eg. '-p 80:4500') -* 'docker login' supports additional options -- Dont save a container`s hostname when committing an image. - -#### Registry - -+ New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries -- Fix issues when uploading images to a private registry - -#### Builder - -+ ENTRYPOINT instruction sets a default binary entry point to a container -+ VOLUME instruction marks a part of the container as persistent data -* 'docker build' displays the full output of a build by default - -## 0.4.8 (2013-07-01) - -+ Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. -- Tests: Fix issues in the test suite - -## 0.4.7 (2013-06-28) - -#### Remote API - -* The progress bar updates faster when downloading and uploading large files -- Fix a bug in the optional unix socket transport - -#### Runtime - -* Improve detection of kernel version -+ Host directories can be mounted as volumes with 'docker run -b' -- fix an issue when only attaching to stdin -* Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts - -#### Hack - -* Improve test suite and dev environment -* Remove dependency on unit tests on 'os/user' - -#### Other - -* Registry: easier push/pull to a custom registry -+ Documentation: add terminology section - -## 0.4.6 (2013-06-22) - -- Runtime: fix a bug which caused creation of empty images (and volumes) to crash. - -## 0.4.5 (2013-06-21) - -+ Builder: 'docker build git://URL' fetches and builds a remote git repository -* Runtime: 'docker ps -s' optionally prints container size -* Tests: improved and simplified -- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. -- Builder: fix a regression when using ADD with single regular file. - -## 0.4.4 (2013-06-19) - -- Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. - -## 0.4.3 (2013-06-19) - -#### Builder - -+ ADD of a local file will detect tar archives and unpack them -* ADD improvements: use tar for copy + automatically unpack local archives -* ADD uses tar/untar for copies instead of calling 'cp -ar' -* Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. -- Fix a bug which caused builds to fail if ADD was the first command -* Nicer output for 'docker build' - -#### Runtime - -* Remove bsdtar dependency -* Add unix socket and multiple -H support -* Prevent rm of running containers -* Use go1.1 cookiejar -- Fix issue detaching from running TTY container -- Forbid parallel push/pull for a single image/repo. Fixes #311 -- Fix race condition within Run command when attaching. - -#### Client - -* HumanReadable ProgressBar sizes in pull -* Fix docker version`s git commit output - -#### API - -* Send all tags on History API call -* Add tag lookup to history command. Fixes #882 - -#### Documentation - -- Fix missing command in irc bouncer example - -## 0.4.2 (2013-06-17) - -- Packaging: Bumped version to work around an Ubuntu bug - -## 0.4.1 (2013-06-17) - -#### Remote Api - -+ Add flag to enable cross domain requests -+ Add images and containers sizes in docker ps and docker images - -#### Runtime - -+ Configure dns configuration host-wide with 'docker -d -dns' -+ Detect faulty DNS configuration and replace it with a public default -+ Allow docker run : -+ You can now specify public port (ex: -p 80:4500) -* Improve image removal to garbage-collect unreferenced parents - -#### Client - -* Allow multiple params in inspect -* Print the container id before the hijack in `docker run` - -#### Registry - -* Add regexp check on repo`s name -* Move auth to the client -- Remove login check on pull - -#### Other - -* Vagrantfile: Add the rest api port to vagrantfile`s port_forward -* Upgrade to Go 1.1 -- Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n - -## 0.4.0 (2013-06-03) - -#### Builder - -+ Introducing Builder -+ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile - -#### Remote API - -+ Introducing Remote API -+ control Docker programmatically using a simple HTTP/json API - -#### Runtime - -* Various reliability and usability improvements - -## 0.3.4 (2013-05-30) - -#### Builder - -+ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile -+ 'docker build -t FOO' applies the tag FOO to the newly built container. - -#### Runtime - -+ Interactive TTYs correctly handle window resize -* Fix how configuration is merged between layers - -#### Remote API - -+ Split stdout and stderr on 'docker run' -+ Optionally listen on a different IP and port (use at your own risk) - -#### Documentation - -* Improve install instructions. - -## 0.3.3 (2013-05-23) - -- Registry: Fix push regression -- Various bugfixes - -## 0.3.2 (2013-05-09) - -#### Registry - -* Improve the checksum process -* Use the size to have a good progress bar while pushing -* Use the actual archive if it exists in order to speed up the push -- Fix error 400 on push - -#### Runtime - -* Store the actual archive on commit - -## 0.3.1 (2013-05-08) - -#### Builder - -+ Implement the autorun capability within docker builder -+ Add caching to docker builder -+ Add support for docker builder with native API as top level command -+ Implement ENV within docker builder -- Check the command existence prior create and add Unit tests for the case -* use any whitespaces instead of tabs - -#### Runtime - -+ Add go version to debug infos -* Kernel version - don`t show the dash if flavor is empty - -#### Registry - -+ Add docker search top level command in order to search a repository -- Fix pull for official images with specific tag -- Fix issue when login in with a different user and trying to push -* Improve checksum - async calculation - -#### Images - -+ Output graph of images to dot (graphviz) -- Fix ByParent function - -#### Documentation - -+ New introduction and high-level overview -+ Add the documentation for docker builder -- CSS fix for docker documentation to make REST API docs look better. -- Fix CouchDB example page header mistake -- Fix README formatting -* Update www.docker.io website. - -#### Other - -+ Website: new high-level overview -- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc -* Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker - -## 0.3.0 (2013-05-06) - -#### Runtime - -- Fix the command existence check -- strings.Split may return an empty string on no match -- Fix an index out of range crash if cgroup memory is not - -#### Documentation - -* Various improvements -+ New example: sharing data between 2 couchdb databases - -#### Other - -* Vagrant: Use only one deb line in /etc/apt -+ Registry: Implement the new registry - -## 0.2.2 (2013-05-03) - -+ Support for data volumes ('docker run -v=PATH') -+ Share data volumes between containers ('docker run -volumes-from') -+ Improve documentation -* Upgrade to Go 1.0.3 -* Various upgrades to the dev environment for contributors - -## 0.2.1 (2013-05-01) - -+ 'docker commit -run' bundles a layer with default runtime options: command, ports etc. -* Improve install process on Vagrant -+ New Dockerfile operation: "maintainer" -+ New Dockerfile operation: "expose" -+ New Dockerfile operation: "cmd" -+ Contrib script to build a Debian base layer -+ 'docker -d -r': restart crashed containers at daemon startup -* Runtime: improve test coverage - -## 0.2.0 (2013-04-23) - -- Runtime: ghost containers can be killed and waited for -* Documentation: update install instructions -- Packaging: fix Vagrantfile -- Development: automate releasing binaries and ubuntu packages -+ Add a changelog -- Various bugfixes - -## 0.1.8 (2013-04-22) - -- Dynamically detect cgroup capabilities -- Issue stability warning on kernels <3.8 -- 'docker push' buffers on disk instead of memory -- Fix 'docker diff' for removed files -- Fix 'docker stop' for ghost containers -- Fix handling of pidfile -- Various bugfixes and stability improvements - -## 0.1.7 (2013-04-18) - -- Container ports are available on localhost -- 'docker ps' shows allocated TCP ports -- Contributors can run 'make hack' to start a continuous integration VM -- Streamline ubuntu packaging & uploading -- Various bugfixes and stability improvements - -## 0.1.6 (2013-04-17) - -- Record the author an image with 'docker commit -author' - -## 0.1.5 (2013-04-17) - -- Disable standalone mode -- Use a custom DNS resolver with 'docker -d -dns' -- Detect ghost containers -- Improve diagnosis of missing system capabilities -- Allow disabling memory limits at compile time -- Add debian packaging -- Documentation: installing on Arch Linux -- Documentation: running Redis on docker -- Fix lxc 0.9 compatibility -- Automatically load aufs module -- Various bugfixes and stability improvements - -## 0.1.4 (2013-04-09) - -- Full support for TTY emulation -- Detach from a TTY session with the escape sequence `C-p C-q` -- Various bugfixes and stability improvements -- Minor UI improvements -- Automatically create our own bridge interface 'docker0' - -## 0.1.3 (2013-04-04) - -- Choose TCP frontend port with '-p :PORT' -- Layer format is versioned -- Major reliability improvements to the process manager -- Various bugfixes and stability improvements - -## 0.1.2 (2013-04-03) - -- Set container hostname with 'docker run -h' -- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' -- Various bugfixes and stability improvements -- UI polish -- Progress bar on push/pull -- Use XZ compression by default -- Make IP allocator lazy - -## 0.1.1 (2013-03-31) - -- Display shorthand IDs for convenience -- Stabilize process management -- Layers can include a commit message -- Simplified 'docker attach' -- Fix support for re-attaching -- Various bugfixes and stability improvements -- Auto-download at run -- Auto-login on push -- Beefed up documentation - -## 0.1.0 (2013-03-23) - -Initial public release - -- Implement registry in order to push/pull images -- TCP port allocation -- Fix termcaps on Linux -- Add documentation -- Add Vagrant support with Vagrantfile -- Add unit tests -- Add repository/tags to ease image management -- Improve the layer implementation diff --git a/vendor/github.com/docker/docker/CONTRIBUTING.md b/vendor/github.com/docker/docker/CONTRIBUTING.md deleted file mode 100644 index 4db7e899..00000000 --- a/vendor/github.com/docker/docker/CONTRIBUTING.md +++ /dev/null @@ -1,438 +0,0 @@ -# Contributing to Docker - -Want to hack on Docker? Awesome! We have a contributor's guide that explains -[setting up a Docker development environment and the contribution -process](https://docs.docker.com/project/who-written-for/). - -![Contributors guide](docs/static_files/contributors.png) - -This page contains information about reporting issues as well as some tips and -guidelines useful to experienced open source contributors. Finally, make sure -you read our [community guidelines](#docker-community-guidelines) before you -start participating. - -## Topics - -* [Reporting Security Issues](#reporting-security-issues) -* [Design and Cleanup Proposals](#design-and-cleanup-proposals) -* [Reporting Issues](#reporting-other-issues) -* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) -* [Community Guidelines](#docker-community-guidelines) - -## Reporting security issues - -The Docker maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! - -Please **DO NOT** file a public issue, instead send your report privately to -[security@docker.com](mailto:security@docker.com). - -Security reports are greatly appreciated and we will publicly thank you for it. -We also like to send gifts—if you're into Docker schwag, make sure to let -us know. We currently do not offer a paid security bounty program, but are not -ruling it out in the future. - - -## Reporting other issues - -A great way to contribute to the project is to send a detailed report when you -encounter an issue. We always appreciate a well-written, thorough bug report, -and will thank you for it! - -Check that [our issue database](https://github.com/docker/docker/issues) -doesn't already include that problem or suggestion before submitting an issue. -If you find a match, add a quick "+1" or "I have this problem too." Doing this -helps prioritize the most common problems and requests. **DO NOT DO THAT** to -subscribe to the issue unless you have something meaningful to add to the -conversation. The best way to subscribe the issue is by clicking Subscribe -button in top right of the page. - -When reporting issues, please include your host OS (Ubuntu 12.04, Fedora 19, -etc). Please include: - -* The output of `uname -a`. -* The output of `docker version`. -* The output of `docker info`. - -Please also include the steps required to reproduce the problem if possible and -applicable. This information will help us review and fix your issue faster. - -**Issue Report Template**: - -``` -Description of problem: - - -`docker version`: - - -`docker info`: - - -`uname -a`: - - -Environment details (AWS, VirtualBox, physical, etc.): - - -How reproducible: - - -Steps to Reproduce: -1. -2. -3. - - -Actual Results: - - -Expected Results: - - -Additional info: - - - -``` - - -##Quick contribution tips and guidelines - -This section gives the experienced contributor some tips and guidelines. - -###Pull requests are always welcome - -Not sure if that typo is worth a pull request? Found a bug and know how to fix -it? Do it! We will appreciate it. Any significant improvement should be -documented as [a GitHub issue](https://github.com/docker/docker/issues) before -anybody starts working on it. - -We are always thrilled to receive pull requests. We do our best to process them -quickly. If your pull request is not accepted on the first try, -don't get discouraged! Our contributor's guide explains [the review process we -use for simple changes](https://docs.docker.com/project/make-a-contribution/). - -### Design and cleanup proposals - -You can propose new designs for existing Docker features. You can also design -entirely new features. We really appreciate contributors who want to refactor or -otherwise cleanup our project. For information on making these types of -contributions, see [the advanced contribution -section](https://docs.docker.com/project/advanced-contributing/) in the -contributors guide. - -We try hard to keep Docker lean and focused. Docker can't do everything for -everybody. This means that we might decide against incorporating a new feature. -However, there might be a way to implement that feature *on top of* Docker. - -### Talking to other Docker users and contributors - - - - - - - - - - - - - - - - - - - - -
Internet Relay Chat (IRC) -

- IRC a direct line to our most knowledgeable Docker users; we have - both the #docker and #docker-dev group on - irc.freenode.net. - IRC is a rich chat protocol but it can overwhelm new users. You can search - our chat archives. -

- Read our IRC quickstart guide for an easy way to get started. -
Google Groups - There are two groups. - Docker-user - is for people using Docker containers. - The docker-dev - group is for contributors and other people contributing to the Docker - project. -
Twitter - You can follow Docker's Twitter feed - to get updates on our products. You can also tweet us questions or just - share blogs or stories. -
Stack Overflow - Stack Overflow has over 17000 Docker questions listed. We regularly - monitor Docker questions - and so do many other knowledgeable Docker users. -
- - -### Conventions - -Fork the repository and make changes on your fork in a feature branch: - -- If it's a bug fix branch, name it XXXX-something where XXXX is the number of - the issue. -- If it's a feature branch, create an enhancement issue to announce - your intentions, and name it XXXX-something where XXXX is the number of the - issue. - -Submit unit tests for your changes. Go has a great test framework built in; use -it! Take a look at existing tests for inspiration. [Run the full test -suite](https://docs.docker.com/project/test-and-docs/) on your branch before -submitting a pull request. - -Update the documentation when creating or modifying features. Test your -documentation changes for clarity, concision, and correctness, as well as a -clean documentation build. See our contributors guide for [our style -guide](https://docs.docker.com/project/doc-style) and instructions on [building -the documentation](https://docs.docker.com/project/test-and-docs/#build-and-test-the-documentation). - -Write clean code. Universally formatted code promotes ease of writing, reading, -and maintenance. Always run `gofmt -s -w file.go` on each changed file before -committing your changes. Most editors have plug-ins that do this automatically. - -Pull request descriptions should be as clear as possible and include a reference -to all the issues that they address. - -Commit messages must start with a capitalized and short summary (max. 50 chars) -written in the imperative, followed by an optional, more detailed explanatory -text which is separated from the summary by an empty line. - -Code review comments may be added to your pull request. Discuss, then make the -suggested modifications and push additional commits to your feature branch. Post -a comment after pushing. New commits show up in the pull request automatically, -but the reviewers are notified only when you comment. - -Pull requests must be cleanly rebased on top of master without multiple branches -mixed into the PR. - -**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your -feature branch to update your pull request rather than `merge master`. - -Before you make a pull request, squash your commits into logical units of work -using `git rebase -i` and `git push -f`. A logical unit of work is a consistent -set of patches that should be reviewed together: for example, upgrading the -version of a vendored dependency and taking advantage of its now available new -feature constitute two separate units of work. Implementing a new function and -calling it in another file constitute a single logical unit of work. The very -high majority of submissions should have a single commit, so if in doubt: squash -down to one. - -After every commit, [make sure the test suite passes] -(https://docs.docker.com/project/test-and-docs/). Include documentation -changes in the same pull request so that a revert would remove all traces of -the feature or fix. - -Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that -close an issue. Including references automatically closes the issue on a merge. - -Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly -from the Git history. - -Please see the [Coding Style](#coding-style) for further guidelines. - -### Merge approval - -Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to -indicate acceptance. - -A change requires LGTMs from an absolute majority of the maintainers of each -component affected. For example, if a change affects `docs/` and `registry/`, it -needs an absolute majority from the maintainers of `docs/` AND, separately, an -absolute majority of the maintainers of `registry/`. - -For more details, see the [MAINTAINERS](MAINTAINERS) page. - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. - -Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still -accepted, so there is no need to update outstanding pull requests to the new -format right away, but please do adjust your processes for future contributions. - -### How can I become a maintainer? - -* Step 1: Learn the component inside out -* Step 2: Make yourself useful by contributing code, bug fixes, support etc. -* Step 3: Volunteer on the IRC channel (#docker at Freenode) -* Step 4: Propose yourself at a scheduled docker meeting in #docker-dev - -Don't forget: being a maintainer is a time investment. Make sure you -will have time to make yourself available. You don't have to be a -maintainer to make a difference on the project! - -### IRC meetings - -There are two monthly meetings taking place on #docker-dev IRC to accommodate all -timezones. Anybody can propose a topic for discussion prior to the meeting. - -If you feel the conversation is going off-topic, feel free to point it out. - -For the exact dates and times, have a look at [the irc-minutes -repo](https://github.com/docker/irc-minutes). The minutes also contain all the -notes from previous meetings. - -## Docker community guidelines - -We want to keep the Docker community awesome, growing and collaborative. We need -your help to keep it that way. To help with this we've come up with some general -guidelines for the community as a whole: - -* Be nice: Be courteous, respectful and polite to fellow community members: - no regional, racial, gender, or other abuse will be tolerated. We like - nice people way better than mean ones! - -* Encourage diversity and participation: Make everyone in our community feel - welcome, regardless of their background and the extent of their - contributions, and do everything possible to encourage participation in - our community. - -* Keep it legal: Basically, don't get us in trouble. Share only content that - you own, do not share private or sensitive information, and don't break - the law. - -* Stay on topic: Make sure that you are posting to the correct channel and - avoid off-topic discussions. Remember when you update an issue or respond - to an email you are potentially sending to a large number of people. Please - consider this before you update. Also remember that nobody likes spam. - -### Guideline violations — 3 strikes method - -The point of this section is not to find opportunities to punish people, but we -do need a fair way to deal with people who are making our community suck. - -1. First occurrence: We'll give you a friendly, but public reminder that the - behavior is inappropriate according to our guidelines. - -2. Second occurrence: We will send you a private message with a warning that - any additional violations will result in removal from the community. - -3. Third occurrence: Depending on the violation, we may need to delete or ban - your account. - -**Notes:** - -* Obvious spammers are banned on first occurrence. If we don't do this, we'll - have spam all over the place. - -* Violations are forgiven after 6 months of good behavior, and we won't hold a - grudge. - -* People who commit minor infractions will get some education, rather than - hammering them in the 3 strikes process. - -* The rules apply equally to everyone in the community, no matter how much - you've contributed. - -* Extreme violations of a threatening, abusive, destructive or illegal nature - will be addressed immediately and are not subject to 3 strikes or forgiveness. - -* Contact abuse@docker.com to report abuse or appeal violations. In the case of - appeals, we know that mistakes happen, and we'll work with you to come up with a - fair solution if there has been a misunderstanding. - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to it's context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant it's own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](http://golang.org/doc/effective_go.html). The -[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/docker/Dockerfile b/vendor/github.com/docker/docker/Dockerfile deleted file mode 100644 index 43f8f64c..00000000 --- a/vendor/github.com/docker/docker/Dockerfile +++ /dev/null @@ -1,201 +0,0 @@ -# This file describes the standard way to build Docker, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test -# -# # Publish a release: -# docker run --privileged \ -# -e AWS_S3_BUCKET=baz \ -# -e AWS_ACCESS_KEY=foo \ -# -e AWS_SECRET_KEY=bar \ -# -e GPG_PASSPHRASE=gloubiboulga \ -# docker hack/release.sh -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM ubuntu:14.04 -MAINTAINER Tianon Gravi (@tianon) - -RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 -RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - aufs-tools \ - automake \ - bash-completion \ - btrfs-tools \ - build-essential \ - createrepo \ - curl \ - dpkg-sig \ - gcc-mingw-w64 \ - git \ - iptables \ - libapparmor-dev \ - libcap-dev \ - libsqlite3-dev \ - libsystemd-journal-dev \ - mercurial \ - parallel \ - pkg-config \ - python-mock \ - python-pip \ - python-websocket \ - reprepro \ - ruby1.9.1 \ - ruby1.9.1-dev \ - s3cmd=1.1.0* \ - ubuntu-zfs \ - libzfs-dev \ - --no-install-recommends - -# Get lvm2 source for compiling statically -RUN git clone -b v2_02_103 https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 \ - && ./configure --enable-static_link \ - && make device-mapper \ - && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# Install Go -ENV GO_VERSION 1.5.1 -RUN curl -sSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar -v -C /usr/local -xz -ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor - -# Compile Go for cross compilation -ENV DOCKER_CROSSPLATFORMS \ - linux/386 linux/arm \ - darwin/amd64 darwin/386 \ - freebsd/amd64 freebsd/386 freebsd/arm \ - windows/amd64 windows/386 - -# (set an explicit GOARM of 5 for maximum compatibility) -ENV GOARM 5 - -# This has been commented out and kept as reference because we don't support compiling with older Go anymore. -# ENV GOFMT_VERSION 1.3.3 -# RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt - -ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 -# Grab Go's cover tool for dead-simple code coverage testing -# Grab Go's vet tool for examining go code to find suspicious constructs -# and help prevent errors that the compiler might not catch -RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ - && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ - && go install -v golang.org/x/tools/cmd/cover \ - && go install -v golang.org/x/tools/cmd/vet -# Grab Go's lint tool -ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 -RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ - && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ - && go install -v github.com/golang/lint/golint - -# TODO replace FPM with some very minimal debhelper stuff -RUN gem install --no-rdoc --no-ri fpm --version 1.3.2 - -# Install registry -ENV REGISTRY_COMMIT ec87e9b6971d831f0eff752ddb54fb64693e51cd -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary server -ENV NOTARY_COMMIT 8e8122eb5528f621afcd4e2854c47302f17392f7 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 47ab89ec2bd3bddf1221b856ffbaff333edeabb4 -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install -r test-requirements.txt - -# Setup s3cmd config -RUN { \ - echo '[default]'; \ - echo 'access_key=$AWS_ACCESS_KEY'; \ - echo 'secret_key=$AWS_SECRET_KEY'; \ - } > ~/.s3cfg - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor selinux - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image.sh /docker-frozen-images \ - busybox:latest@d7057cb020844f245031d27b76cb18af05db1cc3a96a29fa7777af75f5ac91a3 \ - hello-world:frozen@91c95931e552b11604fea91c2f537284149ec32fff0f700a4769cfd31d7696ae \ - jess/unshare@5c9f6ea50341a2a8eb6677527f2bdedbf331ae894a41714fda770fb130f3314d -# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) - -# Download man page generator -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone -b v1.0.3 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ - && git clone -b v1.2 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ - && go get -v -d github.com/cpuguy83/go-md2man \ - && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ - && rm -rf "$GOPATH" - -# Download toml validator -ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ - && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ - && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ - && rm -rf "$GOPATH" - -# Build/install the tool for embedding resources in Windows binaries -ENV RSRC_COMMIT e48dbf1b7fc464a9e85fcec450dddf80816b76e0 -RUN set -x \ - && git clone https://github.com/akavel/rsrc.git /go/src/github.com/akavel/rsrc \ - && cd /go/src/github.com/akavel/rsrc \ - && git checkout -q $RSRC_COMMIT \ - && go install -v - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.dapper b/vendor/github.com/docker/docker/Dockerfile.dapper new file mode 100644 index 00000000..6a6a18c6 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.dapper @@ -0,0 +1,49 @@ +FROM ubuntu:16.04 + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + curl \ + ca-certificates \ + libc-dev \ + gcc \ + git \ + iptables \ + jq \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libseccomp-dev \ + net-tools \ + iproute2 \ + pkg-config \ + tar \ + vim \ + --no-install-recommends + +# Install go +RUN curl -sLf https://storage.googleapis.com/golang/go1.6.2.linux-amd64.tar.gz | tar xzf - -C /usr/local +ENV GOPATH=/go +ENV PATH=/go/bin:/usr/local/go/bin:$PATH + +# Setup runc +RUN ln -s /go/src/github.com/docker/docker/bin/docker /usr/local/bin/docker-runc + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +# Trash +RUN go get github.com/rancher/trash + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux cgo daemon netgo +ENV DAPPER_SOURCE /go/src/github.com/docker/docker +ENV DAPPER_RUN_ARGS --privileged +ENV DAPPER_OUTPUT bin +ENV DAPPER_ENV TAG REPO +ENV TRASH_CACHE ${DAPPER_SOURCE}/.trash-cache +ENV PATH=${DAPPER_SOURCE}/bin:$PATH + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENTRYPOINT ["./scripts/entry"] diff --git a/vendor/github.com/docker/docker/Dockerfile.gccgo b/vendor/github.com/docker/docker/Dockerfile.gccgo deleted file mode 100644 index 1392e723..00000000 --- a/vendor/github.com/docker/docker/Dockerfile.gccgo +++ /dev/null @@ -1,62 +0,0 @@ -# This file describes the standard way to build Docker, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.gccgo . -# - -FROM gcc:5.2 - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - aufs-tools \ - btrfs-tools \ - build-essential \ - curl \ - git \ - iptables \ - net-tools \ - libapparmor-dev \ - libcap-dev \ - libsqlite3-dev \ - mercurial \ - parallel \ - python-mock \ - python-pip \ - python-websocket \ - --no-install-recommends - -# Get lvm2 source for compiling statically -RUN git clone -b v2_02_103 https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 \ - && ./configure --enable-static_link \ - && make device-mapper \ - && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 139850f3f3b17357bab5ba3edfb745fb14043764 -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor selinux - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.simple b/vendor/github.com/docker/docker/Dockerfile.simple deleted file mode 100644 index 507018b0..00000000 --- a/vendor/github.com/docker/docker/Dockerfile.simple +++ /dev/null @@ -1,33 +0,0 @@ -# docker build -t docker:simple -f Dockerfile.simple . -# docker run --rm docker:simple hack/make.sh dynbinary -# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit -# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli - -# This represents the bare minimum required to build and test Docker. - -FROM debian:jessie - -# compile and runtime deps -# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies -# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - btrfs-tools \ - curl \ - gcc \ - git \ - golang \ - libdevmapper-dev \ - libsqlite3-dev \ - \ - ca-certificates \ - e2fsprogs \ - iptables \ - procps \ - xz-utils \ - \ - aufs-tools \ - && rm -rf /var/lib/apt/lists/* - -ENV AUTO_GOPATH 1 -WORKDIR /usr/src/docker -COPY . /usr/src/docker diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE index c7a3f0cf..8f3fee62 100644 --- a/vendor/github.com/docker/docker/LICENSE +++ b/vendor/github.com/docker/docker/LICENSE @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2013-2015 Docker, Inc. + Copyright 2013-2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/docker/docker/MAINTAINERS b/vendor/github.com/docker/docker/MAINTAINERS deleted file mode 100644 index 502cad3a..00000000 --- a/vendor/github.com/docker/docker/MAINTAINERS +++ /dev/null @@ -1,599 +0,0 @@ -# Docker maintainers file -# -# This file describes who runs the Docker project and how. -# This is a living document - if you see something out of date or missing, -# speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant -# parser. - -[Rules] - - [Rules.maintainers] - - title = "What is a maintainer?" - - text = """ -There are different types of maintainers, with different responsibilities, but -all maintainers have 3 things in common: - -1) They share responsibility in the project's success. -2) They have made a long-term, recurring time investment to improve the project. -3) They spend that time doing whatever needs to be done, not necessarily what -is the most interesting or fun. - -Maintainers are often under-appreciated, because their work is harder to appreciate. -It's easy to appreciate a really cool and technically advanced feature. It's harder -to appreciate the absence of bugs, the slow but steady improvement in stability, -or the reliability of a release process. But those things distinguish a good -project from a great one. -""" - - [Rules.bdfl] - - title = "The Benevolent dictator for life (BDFL)" - - text = """ -Docker follows the timeless, highly efficient and totally unfair system -known as [Benevolent dictator for -life](https://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with -yours truly, Solomon Hykes, in the role of BDFL. This means that all -decisions are made, by default, by Solomon. Since making every decision -myself would be highly un-scalable, in practice decisions are spread -across multiple maintainers. - -Ideally, the BDFL role is like the Queen of England: awesome crown, but not -an actual operational role day-to-day. The real job of a BDFL is to NEVER GO AWAY. -Every other rule can change, perhaps drastically so, but the BDFL will always -be there, preserving the philosophy and principles of the project, and keeping -ultimate authority over its fate. This gives us great flexibility in experimenting -with various governance models, knowing that we can always press the "reset" button -without fear of fragmentation or deadlock. See the US congress for a counter-example. - -BDFL daily routine: - -* Is the project governance stuck in a deadlock or irreversibly fragmented? - * If yes: refactor the project governance -* Are there issues or conflicts escalated by core? - * If yes: resolve them -* Go back to polishing that crown. -""" - - [Rules.decisions] - - title = "How are decisions made?" - - text = """ -Short answer: EVERYTHING IS A PULL REQUEST. - -Docker is an open-source project with an open design philosophy. This -means that the repository is the source of truth for EVERY aspect of the -project, including its philosophy, design, road map, and APIs. *If it's -part of the project, it's in the repo. If it's in the repo, it's part of -the project.* - -As a result, all decisions can be expressed as changes to the -repository. An implementation change is a change to the source code. An -API change is a change to the API specification. A philosophy change is -a change to the philosophy manifesto, and so on. - -All decisions affecting Docker, big and small, follow the same 3 steps: - -* Step 1: Open a pull request. Anyone can do this. - -* Step 2: Discuss the pull request. Anyone can do this. - -* Step 3: Merge or refuse the pull request. Who does this depends on the nature -of the pull request and which areas of the project it affects. See *review flow* -for details. - -Because Docker is such a large and active project, it's important for everyone to know -who is responsible for deciding what. That is determined by a precise set of rules. - -* For every *decision* in the project, the rules should designate, in a deterministic way, -who should *decide*. - -* For every *problem* in the project, the rules should designate, in a deterministic way, -who should be responsible for *fixing* it. - -* For every *question* in the project, the rules should designate, in a deterministic way, -who should be expected to have the *answer*. -""" - - [Rules.review] - - title = "Review flow" - - text = """ -Pull requests should be processed according to the following flow: - -* For each subsystem affected by the change, the maintainers of the subsystem must approve or refuse it. -It is the responsibility of the subsystem maintainers to process patches affecting them in a timely -manner. - -* If the change affects areas of the code which are not part of a subsystem, -or if subsystem maintainers are unable to reach a timely decision, it must be approved by -the core maintainers. - -* If the change affects the UI or public APIs, or if it represents a major change in architecture, -the architects must approve or refuse it. - -* If the change affects the operations of the project, it must be approved or rejected by -the relevant operators. - -* If the change affects the governance, philosophy, goals or principles of the project, -it must be approved by BDFL. -""" - - [Rules.DCO] - - title = "Helping contributors with the DCO" - - text = """ -The [DCO or `Sign your work`]( -https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work) -requirement is not intended as a roadblock or speed bump. - -Some Docker contributors are not as familiar with `git`, or have used a web based -editor, and thus asking them to `git commit --amend -s` is not the best way forward. - -In this case, maintainers can update the commits based on clause (c) of the DCO. The -most trivial way for a contributor to allow the maintainer to do this, is to add -a DCO signature in a Pull Requests's comment, or a maintainer can simply note that -the change is sufficiently trivial that it does not substantially change the existing -contribution - i.e., a spelling change. - -When you add someone's DCO, please also add your own to keep a log. -""" - - [Rules.holiday] - - title = "I'm a maintainer, and I'm going on holiday" - - text = """ -Please let your co-maintainers and other contributors know by raising a pull -request that comments out your `MAINTAINERS` file entry using a `#`. -""" - - [Rules."no direct push"] - - title = "I'm a maintainer. Should I make pull requests too?" - - text = """ -Yes. Nobody should ever push to master directly. All changes should be -made through a pull request. -""" - - [Rules.meta] - - title = "How is this process changed?" - - text = "Just like everything else: by making a pull request :)" - -# Current project organization -[Org] - - bdfl = "shykes" - - # The chief architect is responsible for the overall integrity of the technical architecture - # across all subsystems, and the consistency of APIs and UI. - # - # Changes to UI, public APIs and overall architecture (for example a plugin system) must - # be approved by the chief architect. - "Chief Architect" = "shykes" - - [Org.Operators] - - # The operators make sure the trains run on time. They are responsible for overall operations - # of the project. This includes facilitating communication between all the participants; helping - # newcomers get involved and become successful contributors and maintainers; tracking the schedule - # of releases; managing the relationship with downstream distributions and upstream dependencies; - # define measures of success for the project and measure progress; Devise and implement tools and - # processes which make contributors and maintainers happier and more efficient. - - - [Org.Operators.security] - - people = [ - "erw", - "diogomonica", - "nathanmccauley" - ] - - [Org.Operators."monthly meetings"] - - people = [ - "sven", - "tianon" - ] - - [Org.Operators.infrastructure] - - people = [ - "jfrazelle", - "crosbymichael" - ] - - [Org.Operators.community] - people = [ - "theadactyl" - ] - - # The chief maintainer is responsible for all aspects of quality for the project including - # code reviews, usability, stability, security, performance, etc. - # The most important function of the chief maintainer is to lead by example. On the first - # day of a new maintainer, the best advice should be "follow the C.M.'s example and you'll - # be fine". - "Chief Maintainer" = "crosbymichael" - - # The community manager is responsible for serving the project community, including users, - # contributors and partners. This involves: - # - facilitating communication between maintainers, contributors and users - # - organizing contributor and maintainer events - # - helping new contributors get involved - # - anything the project community needs to be successful - # - # The community manager is a point of contact for any contributor who has questions, concerns - # or feedback about project operations. - "Community Manager" = "theadactyl" - - [Org."Core maintainers"] - - # The Core maintainers are the ghostbusters of the project: when there's a problem others - # can't solve, they show up and fix it with bizarre devices and weaponry. - # They have final say on technical implementation and coding style. - # They are ultimately responsible for quality in all its forms: usability polish, - # bugfixes, performance, stability, etc. When ownership can cleanly be passed to - # a subsystem, they are responsible for doing so and holding the - # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. - - # For each release (including minor releases), a "release captain" is assigned from the - # pool of core maintainers. Rotation is encouraged across all maintainers, to ensure - # the release process is clear and up-to-date. - # - # It is common for core maintainers to "branch out" to join or start a subsystem. - - people = [ - "calavera", - "crosbymichael", - "erikh", - "estesp", - "icecrime", - "jfrazelle", - "lk4d4", - "runcom", - "tibor", - "unclejack", - "vbatts", - "vdemeester", - "vieux", - "vishh" - ] - - [Org.Subsystems] - - # As the project grows, it gets separated into well-defined subsystems. Each subsystem - # has a dedicated group of maintainers, which are dedicated to that subsytem and responsible - # for its quality. - # This "cellular division" is the primary mechanism for scaling maintenance of the project as it grows. - # - # The maintainers of each subsytem are responsible for: - # - # 1. Exposing a clear road map for improving their subsystem. - # 2. Deliver prompt feedback and decisions on pull requests affecting their subsystem. - # 3. Be available to anyone with questions, bug reports, criticism etc. - # on their component. This includes IRC, GitHub requests and the mailing - # list. - # 4. Make sure their subsystem respects the philosophy, design and - # road map of the project. - # - # #### How to review patches to your subsystem - # - # Accepting pull requests: - # - # - If the pull request appears to be ready to merge, give it a `LGTM`, which - # stands for "Looks Good To Me". - # - If the pull request has some small problems that need to be changed, make - # a comment adressing the issues. - # - If the changes needed to a PR are small, you can add a "LGTM once the - # following comments are addressed..." this will reduce needless back and - # forth. - # - If the PR only needs a few changes before being merged, any MAINTAINER can - # make a replacement PR that incorporates the existing commits and fixes the - # problems before a fast track merge. - # - # Closing pull requests: - # - # - If a PR appears to be abandoned, after having attempted to contact the - # original contributor, then a replacement PR may be made. Once the - # replacement PR is made, any contributor may close the original one. - # - If you are not sure if the pull request implements a good feature or you - # do not understand the purpose of the PR, ask the contributor to provide - # more documentation. If the contributor is not able to adequately explain - # the purpose of the PR, the PR may be closed by any MAINTAINER. - # - If a MAINTAINER feels that the pull request is sufficiently architecturally - # flawed, or if the pull request needs significantly more design discussion - # before being considered, the MAINTAINER should close the pull request with - # a short explanation of what discussion still needs to be had. It is - # important not to leave such pull requests open, as this will waste both the - # MAINTAINER's time and the contributor's time. It is not good to string a - # contributor on for weeks or months, having them make many changes to a PR - # that will eventually be rejected. - - [Org.Subsystems.Documentation] - - people = [ - "james", - "moxiegirl", - "thaJeztah", - "jamtur01", - "sven" - ] - - [Org.Subsystems.libcontainer] - - people = [ - "crosbymichael", - "jnagal", - "lk4d4", - "mpatel", - "vmarmol" - ] - - [Org.Subsystems.registry] - - people = [ - "dmcg", - "dmp42", - "jlhawn", - "samalba", - "sday", - "vbatts" - ] - - [Org.Subsystems."build tools"] - - people = [ - "shykes", - "tianon" - ] - - [Org.Subsystem."remote api"] - - people = [ - "vieux" - ] - - [Org.Subsystem.swarm] - - people = [ - "aluzzardi", - "vieux" - ] - - [Org.Subsystem.machine] - - people = [ - "bfirsh", - "ehazlett" - ] - - [Org.Subsystem.compose] - - people = [ - "aanand" - ] - - [Org.Subsystem.builder] - - people = [ - "duglin", - "erikh", - "tibor" - ] - - [Org.Curators] - - # The curators help ensure that incoming issues and pull requests are properly triaged and - # that our various contribution and reviewing processes are respected. With their knowledge of - # the repository activity, they can also guide contributors to relevant material or - # discussions. - # - # They are neither code nor docs reviewers, so they are never expected to merge. They can - # however: - # - close an issue or pull request when it's an exact duplicate - # - close an issue or pull request when it's inappropriate or off-topic - - people = [ - "thajeztah" - ] - - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.aanand] - Name = "Aanand Prasad" - Email = "aanand@docker.com" - GitHub = "aanand" - - [people.aluzzardi] - Name = "Andrea Luzzardi" - Email = "aluzzardi@docker.com" - GitHub = "aluzzardi" - - [people.bfirsh] - Name = "Ben Firshman" - Email = "ben@firshman.co.uk" - GitHub = "bfirsh" - - [people.calavera] - Name = "David Calavera" - Email = "david.calavera@gmail.com" - GitHub = "calavera" - - [people.cpuguy83] - Name = "Brian Goff" - Email = "cpuguy83@gmail.com" - Github = "cpuguy83" - - [people.crosbymichael] - Name = "Michael Crosby" - Email = "crosbymichael@gmail.com" - GitHub = "crosbymichael" - - [people.diogomonica] - Name = "Diogo Monica" - Email = "diogo@docker.com" - GitHub = "diogomonica" - - [people.duglin] - Name = "Doug Davis" - Email = "dug@us.ibm.com" - GitHub = "duglin" - - [people.dmcg] - Name = "Derek McGowan" - Email = "derek@docker.com" - Github = "dmcgowan" - - [people.dmp42] - Name = "Olivier Gambier" - Email = "olivier@docker.com" - Github = "dmp42" - - [people.ehazlett] - Name = "Evan Hazlett" - Email = "ejhazlett@gmail.com" - GitHub = "ehazlett" - - [people.erikh] - Name = "Erik Hollensbe" - Email = "erik@docker.com" - GitHub = "erikh" - - [people.erw] - Name = "Eric Windisch" - Email = "eric@windisch.us" - GitHub = "ewindisch" - - [people.estesp] - Name = "Phil Estes" - Email = "estesp@linux.vnet.ibm.com" - GitHub = "estesp" - - [people.icecrime] - Name = "Arnaud Porterie" - Email = "arnaud@docker.com" - GitHub = "icecrime" - - [people.jfrazelle] - Name = "Jessie Frazelle" - Email = "j@docker.com" - GitHub = "jfrazelle" - - [people.jlhawn] - Name = "Josh Hawn" - Email = "josh.hawn@docker.com" - Github = "jlhawn" - - [people.jnagal] - Name = "Rohit Jnagal" - Email = "jnagal@google.com" - GitHub = "rjnagal" - - [people.lk4d4] - Name = "Alexander Morozov" - Email = "lk4d4@docker.com" - GitHub = "lk4d4" - - [people.moxiegirl] - Name = "Mary Anthony" - Email = "mary.anthony@docker.com" - GitHub = "moxiegirl" - - [people.mpatel] - Name = "Mrunal Patel" - Email = "mpatel@redhat.com" - GitHub = "mrunalp" - - [people.nathanmccauley] - Name = "Nathan McCauley" - Email = "nathan.mccauley@docker.com" - GitHub = "nathanmccauley" - - [people.runcom] - Name = "Antonio Murdaca" - Email = "me@runcom.ninja" - GitHub = "runcom" - - [people.sday] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - Github = "stevvooe" - - [people.shykes] - Name = "Solomon Hykes" - Email = "solomon@docker.com" - GitHub = "shykes" - - [people.sven] - Name = "Sven Dowideit" - Email = "SvenDowideit@home.org.au" - GitHub = "SvenDowideit" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.theadactyl] - Name = "Thea Lamkin" - Email = "thea@docker.com" - GitHub = "theadactyl" - - [people.tianon] - Name = "Tianon Gravi" - Email = "admwiggin@gmail.com" - GitHub = "tianon" - - [people.tibor] - Name = "Tibor Vass" - Email = "tibor@docker.com" - GitHub = "tiborvass" - - [people.unclejack] - Name = "Cristian Staretu" - Email = "cristian.staretu@gmail.com" - GitHub = "unclejack" - - [people.vbatts] - Name = "Vincent Batts" - Email = "vbatts@redhat.com" - GitHub = "vbatts" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" - - [people.vieux] - Name = "Victor Vieux" - Email = "vieux@docker.com" - GitHub = "vieux" - - [people.vishh] - Name = "Vishnu Kannan" - Email = "vishnuk@google.com" - GitHub = "vishh" - - [people.vmarmol] - Name = "Victor Marmol" - Email = "vmarmol@google.com" - GitHub = "vmarmol" diff --git a/vendor/github.com/docker/docker/Makefile b/vendor/github.com/docker/docker/Makefile index c1f58806..d7d72a16 100644 --- a/vendor/github.com/docker/docker/Makefile +++ b/vendor/github.com/docker/docker/Makefile @@ -1,85 +1,23 @@ -.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration-cli test-docker-py validate +TARGETS := $(shell ls scripts) -# env vars passed through directly to Docker's build scripts -# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily -# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these -DOCKER_ENVS := \ - -e BUILDFLAGS \ - -e DOCKER_CLIENTONLY \ - -e DOCKER_DEBUG \ - -e DOCKER_EXPERIMENTAL \ - -e DOCKER_REMAP_ROOT \ - -e DOCKER_GRAPHDRIVER \ - -e DOCKER_STORAGE_OPTS \ - -e DOCKER_USERLANDPROXY \ - -e TESTDIRS \ - -e TESTFLAGS \ - -e TIMEOUT -# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds +.dapper: + @echo Downloading dapper + @curl -sL https://releases.rancher.com/dapper/latest/dapper-`uname -s`-`uname -m` > .dapper.tmp + @@chmod +x .dapper.tmp + @./.dapper.tmp -v + @mv .dapper.tmp .dapper -# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` -# (default to no bind mount if DOCKER_HOST is set) -# note: BINDDIR is supported for backwards-compatibility here -BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) -DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") +$(TARGETS): .dapper + ./.dapper $@ +trash: .dapper + ./.dapper -m bind trash -GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) -DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH)) -DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) +trash-keep: .dapper + ./.dapper -m bind trash -k -DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) +deps: trash -# if this session isn't interactive, then we don't want to allocate a -# TTY, which would fail, but if it is interactive, we do want to attach -# so that the user can send e.g. ^C through. -INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) -ifeq ($(INTERACTIVE), 1) - DOCKER_FLAGS += -t -endif +.DEFAULT_GOAL := ci -DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" - -default: binary - -all: build - $(DOCKER_RUN_DOCKER) hack/make.sh - -binary: build - $(DOCKER_RUN_DOCKER) hack/make.sh binary - -cross: build - $(DOCKER_RUN_DOCKER) hack/make.sh binary cross - -deb: build - $(DOCKER_RUN_DOCKER) hack/make.sh binary build-deb - -rpm: build - $(DOCKER_RUN_DOCKER) hack/make.sh binary build-rpm - -test: build - $(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration-cli test-docker-py - -test-unit: build - $(DOCKER_RUN_DOCKER) hack/make.sh test-unit - -test-integration-cli: build - $(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli - -test-docker-py: build - $(DOCKER_RUN_DOCKER) hack/make.sh binary test-docker-py - -validate: build - $(DOCKER_RUN_DOCKER) hack/make.sh validate-dco validate-gofmt validate-pkg validate-lint validate-test validate-toml validate-vet - -shell: build - $(DOCKER_RUN_DOCKER) bash - -build: bundles - docker build -t "$(DOCKER_IMAGE)" . - -bundles: - mkdir bundles - -docs: - $(MAKE) -C docs docs +.PHONY: $(TARGETS) diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE index 6e6f469a..8a37c1c7 100644 --- a/vendor/github.com/docker/docker/NOTICE +++ b/vendor/github.com/docker/docker/NOTICE @@ -1,5 +1,5 @@ Docker -Copyright 2012-2015 Docker, Inc. +Copyright 2012-2016 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). diff --git a/vendor/github.com/docker/docker/README.md b/vendor/github.com/docker/docker/README.md deleted file mode 100644 index a4d65c32..00000000 --- a/vendor/github.com/docker/docker/README.md +++ /dev/null @@ -1,300 +0,0 @@ -Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest) -============================ - -Docker is an open source project to pack, ship and run any application -as a lightweight container. - -Docker containers are both *hardware-agnostic* and *platform-agnostic*. -This means they can run anywhere, from your laptop to the largest -cloud compute instance and everything in between - and they don't require -you to use a particular language, framework or packaging system. That -makes them great building blocks for deploying and scaling web apps, -databases, and backend services without depending on a particular stack -or provider. - -Docker began as an open-source implementation of the deployment engine which -powers [dotCloud](https://www.dotcloud.com), a popular Platform-as-a-Service. -It benefits directly from the experience accumulated over several years -of large-scale operation and support of hundreds of thousands of -applications and databases. - -![Docker L](docs/static_files/docker-logo-compressed.png "Docker") - -## Security Disclosure - -Security is very important to us. If you have any issue regarding security, -please disclose the information responsibly by sending an email to -security@docker.com and not by creating a github issue. - -## Better than VMs - -A common method for distributing applications and sandboxing their -execution is to use virtual machines, or VMs. Typical VM formats are -VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory -these formats should allow every developer to automatically package -their application into a "machine" for easy distribution and deployment. -In practice, that almost never happens, for a few reasons: - - * *Size*: VMs are very large which makes them impractical to store - and transfer. - * *Performance*: running VMs consumes significant CPU and memory, - which makes them impractical in many scenarios, for example local - development of multi-tier applications, and large-scale deployment - of cpu and memory-intensive applications on large numbers of - machines. - * *Portability*: competing VM environments don't play well with each - other. Although conversion tools do exist, they are limited and - add even more overhead. - * *Hardware-centric*: VMs were designed with machine operators in - mind, not software developers. As a result, they offer very - limited tooling for what developers need most: building, testing - and running their software. For example, VMs offer no facilities - for application versioning, monitoring, configuration, logging or - service discovery. - -By contrast, Docker relies on a different sandboxing method known as -*containerization*. Unlike traditional virtualization, containerization -takes place at the kernel level. Most modern operating system kernels -now support the primitives necessary for containerization, including -Linux with [openvz](https://openvz.org), -[vserver](http://linux-vserver.org) and more recently -[lxc](https://linuxcontainers.org/), Solaris with -[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc), -and FreeBSD with -[Jails](https://www.freebsd.org/doc/handbook/jails.html). - -Docker builds on top of these low-level primitives to offer developers a -portable format and runtime environment that solves all four problems. -Docker containers are small (and their transfer can be optimized with -layers), they have basically zero memory and cpu overhead, they are -completely portable, and are designed from the ground up with an -application-centric design. - -Perhaps best of all, because Docker operates at the OS level, it can still be -run inside a VM! - -## Plays well with others - -Docker does not require you to buy into a particular programming -language, framework, packaging system, or configuration language. - -Is your application a Unix process? Does it use files, tcp connections, -environment variables, standard Unix streams and command-line arguments -as inputs and outputs? Then Docker can run it. - -Can your application's build be expressed as a sequence of such -commands? Then Docker can build it. - -## Escape dependency hell - -A common problem for developers is the difficulty of managing all -their application's dependencies in a simple and automated way. - -This is usually difficult for several reasons: - - * *Cross-platform dependencies*. Modern applications often depend on - a combination of system libraries and binaries, language-specific - packages, framework-specific modules, internal components - developed for another project, etc. These dependencies live in - different "worlds" and require different tools - these tools - typically don't work well with each other, requiring awkward - custom integrations. - - * *Conflicting dependencies*. Different applications may depend on - different versions of the same dependency. Packaging tools handle - these situations with various degrees of ease - but they all - handle them in different and incompatible ways, which again forces - the developer to do extra work. - - * *Custom dependencies*. A developer may need to prepare a custom - version of their application's dependency. Some packaging systems - can handle custom versions of a dependency, others can't - and all - of them handle it differently. - - -Docker solves the problem of dependency hell by giving the developer a simple -way to express *all* their application's dependencies in one place, while -streamlining the process of assembling them. If this makes you think of -[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't -*replace* your favorite packaging systems. It simply orchestrates -their use in a simple and repeatable way. How does it do that? With -layers. - -Docker defines a build as running a sequence of Unix commands, one -after the other, in the same container. Build commands modify the -contents of the container (usually by installing new files on the -filesystem), the next command modifies it some more, etc. Since each -build command inherits the result of the previous commands, the -*order* in which the commands are executed expresses *dependencies*. - -Here's a typical Docker build process: - -```bash -FROM ubuntu:12.04 -RUN apt-get update && apt-get install -y python python-pip curl -RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv -RUN cd helloflask-master && pip install -r requirements.txt -``` - -Note that Docker doesn't care *how* dependencies are built - as long -as they can be built by running a Unix command in a container. - - -Getting started -=============== - -Docker can be installed on your local machine as well as servers - both -bare metal and virtualized. It is available as a binary on most modern -Linux systems, or as a VM on Windows, Mac and other systems. - -We also offer an [interactive tutorial](https://www.docker.com/tryit/) -for quickly learning the basics of using Docker. - -For up-to-date install instructions, see the [Docs](https://docs.docker.com). - -Usage examples -============== - -Docker can be used to run short-lived commands, long-running daemons -(app servers, databases, etc.), interactive shell sessions, etc. - -You can find a [list of real-world -examples](https://docs.docker.com/examples/) in the -documentation. - -Under the hood --------------- - -Under the hood, Docker is built on the following components: - -* The - [cgroups](https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt) - and - [namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html) - capabilities of the Linux kernel -* The [Go](https://golang.org) programming language -* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md) -* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md) - -Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker) -====================== - -| **Master** (Linux) | **Experimental** (linux) | **Windows** | **FreeBSD** | -|------------------|----------------------|---------|---------| -| [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) | - -Want to hack on Docker? Awesome! We have [instructions to help you get -started contributing code or documentation](https://docs.docker.com/project/who-written-for/). - -These instructions are probably not perfect, please let us know if anything -feels wrong or incomplete. Better yet, submit a PR and improve them yourself. - -Getting the development builds -============================== - -Want to run Docker from a master build? You can download -master builds at [master.dockerproject.org](https://master.dockerproject.org). -They are updated with each commit merged into the master branch. - -Don't know how to use that super cool new feature in the master build? Check -out the master docs at -[docs.master.dockerproject.org](http://docs.master.dockerproject.org). - -How the project is run -====================== - -Docker is a very, very active project. If you want to learn more about how it is run, -or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project). - -We are always open to suggestions on process improvements, and are always looking for more maintainers. - -### Talking to other Docker users and contributors - - - - - - - - - - - - - - - - - - - - -
Internet Relay Chat (IRC) -

- IRC a direct line to our most knowledgeable Docker users; we have - both the #docker and #docker-dev group on - irc.freenode.net. - IRC is a rich chat protocol but it can overwhelm new users. You can search - our chat archives. -

- Read our IRC quickstart guide for an easy way to get started. -
Google Groups - There are two groups. - Docker-user - is for people using Docker containers. - The docker-dev - group is for contributors and other people contributing to the Docker - project. -
Twitter - You can follow Docker's Twitter feed - to get updates on our products. You can also tweet us questions or just - share blogs or stories. -
Stack Overflow - Stack Overflow has over 7000 Docker questions listed. We regularly - monitor Docker questions - and so do many other knowledgeable Docker users. -
- -### Legal - -*Brought to you courtesy of our legal counsel. For more context, -please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.* - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. - -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - - -Licensing -========= -Docker is licensed under the Apache License, Version 2.0. See -[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full -license text. - -Other Docker Related Projects -============================= -There are a number of projects under development that are based on Docker's -core technology. These projects expand the tooling built around the -Docker platform to broaden its application and utility. - -* [Docker Registry](https://github.com/docker/distribution): Registry -server for Docker (hosting/delivery of repositories and images) -* [Docker Machine](https://github.com/docker/machine): Machine management -for a container-centric world -* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering -system -* [Docker Compose](https://github.com/docker/compose) (formerly Fig): -Define and run multi-container apps -* [Kitematic](https://github.com/kitematic/kitematic): The easiest way to use -Docker on Mac and Windows - -If you know of another project underway that should be listed here, please help -us keep this list up-to-date by submitting a PR. - -Awesome-Docker -============== -You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there. diff --git a/vendor/github.com/docker/docker/ROADMAP.md b/vendor/github.com/docker/docker/ROADMAP.md deleted file mode 100644 index 4ec0bf0a..00000000 --- a/vendor/github.com/docker/docker/ROADMAP.md +++ /dev/null @@ -1,183 +0,0 @@ -Docker Engine Roadmap -===================== - -### How should I use this document? - -This document provides description of items that the project decided to prioritize. This should -serve as a reference point for Docker contributors to understand where the project is going, and -help determine if a contribution could be conflicting with some longer terms plans. - -The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be -refused (except for those mentioned as "frozen features" below)! We are always happy to receive -patches for new cool features we haven't thought about, or didn't judge priority. Please however -understand that such patches might take longer for us to review. - -### How can I help? - -Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described -in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our -goal is to split down the workload in such way that anybody can jump in and help. Please comment on -issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already -assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is -the best way to go. - -### How can I add something to the roadmap? - -The roadmap process is new to the Docker Engine: we are only beginning to structure and document the -project objectives. Our immediate goal is to be more transparent, and work with our community to -focus our efforts on fewer prioritized topics. - -We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but -we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we -won't be accepting pull requests adding or removing items from this file. - -# 1. Features and refactoring - -## 1.1 Security - -Security is a top objective for the Docker Engine. The most notable items we intend to provide in -the near future are: - -- Trusted distribution of images: the effort is driven by the [distribution](https://github.com/docker/distribution) -group but will have significant impact on the Engine -- [User namespaces](https://github.com/docker/docker/pull/12648) -- [Seccomp support](https://github.com/docker/libcontainer/pull/613) - -## 1.2 Plumbing project - -We define a plumbing tool as a standalone piece of software usable and meaningful on its own. In -the current state of the Docker Engine, most subsystems provide independent functionalities (such -the builder, pushing and pulling images, running applications in a containerized environment, etc) -but all are coupled in a single binary. We want to offer the users to flexibility to use only the -pieces they need, and we will also gain in maintainability by splitting the project among multiple -repositories. - -As it currently stands, the rough design outlines is to have: -- Low level plumbing tools, each dealing with one responsibility (e.g., [runC](https://runc.io)) -- Docker subsystems services, each exposing an elementary concept over an API, and relying on one or -multiple lower level plumbing tools for their implementation (e.g., network management) -- Docker Engine to expose higher level actions (e.g., create a container with volume `V` and network -`N`), while still providing pass-through access to the individual subsystems. - -The architectural details are still being worked on, but one thing we know for sure is that we need -to technically decouple the pieces. - -### 1.2.1 Runtime - -A Runtime tool already exists today in the form of [runC](https://github.com/opencontainers/runc). -We intend to modify the Engine to directly call out to a binary implementing the Open Containers -Specification such as runC rather than relying on libcontainer to set the container runtime up. - -This plan will deprecate the existing [`execdriver`](https://github.com/docker/docker/tree/master/daemon/execdriver) -as different runtime backends will be implemented as separated binaries instead of being compiled -into the Engine. - -### 1.2.2 Builder - -The Builder (i.e., the ability to build an image from a Dockerfile) is already nicely decoupled, -but would benefit from being entirely separated from the Engine, and rely on the standard Engine -API for its operations. - -### 1.2.3 Distribution - -Distribution already has a [dedicated repository](https://github.com/docker/distribution) which -holds the implementation for Registry v2 and client libraries. We could imagine going further by -having the Engine call out to a binary providing image distribution related functionalities. - -There are two short term goals related to image distribution. The first is stabilize and simplify -the push/pull code. Following that is the conversion to the more secure Registry V2 protocol. - -### 1.2.4 Networking - -Most of networking related code was already decoupled today in [libnetwork](https://github.com/docker/libnetwork). -As with other ingredients, we might want to take it a step further and make it a meaningful utility -that the Engine would call out to instead of a library. - -## 1.3 Plugins - -An initiative around plugins started with Docker 1.7.0, with the goal of allowing for out of -process extensibility of some Docker functionalities, starting with volumes and networking. The -approach is to provide specific extension points rather than generic hooking facilities. We also -deliberately keep the extensions API the simplest possible, expanding as we discover valid use -cases that cannot be implemented. - -At the time of writing: - -- Plugin support is merged as an experimental feature: real world use cases and user feedback will -help us refine the UX to make the feature more user friendly. -- There are no immediate plans to expand on the number of pluggable subsystems. -- Golang 1.5 might add language support for [plugins](https://docs.google.com/document/d/1nr-TQHw_er6GOQRsF6T43GGhFDelrAP0NqSS_00RgZQ) -which we consider supporting as an alternative to JSON/HTTP. - -## 1.4 Volume management - -Volumes are not a first class citizen in the Engine today: we would like better volume management, -similar to the way network are managed in the new [CNM](https://github.com/docker/docker/issues/9983). - -## 1.5 Better API implementation - -The current Engine API is insufficiently typed, versioned, and ultimately hard to maintain. We -also suffer from the lack of a common implementation with [Swarm](https://github.com/docker/swarm). - -## 1.6 Checkpoint/restore - -Support for checkpoint/restore was [merged](https://github.com/docker/libcontainer/pull/479) in -[libcontainer](https://github.com/docker/libcontainer) and made available through [runC](https://runc.io): -we intend to take advantage of it in the Engine. - -# 2 Frozen features - -## 2.1 Docker exec - -We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a -*debugging* feature, as well as being strongly dependent on the Runtime ingredient effort. - -## 2.2 Dockerfile syntax - -The Dockerfile syntax as we know it is simple, and has proven successful in supporting all our -[official images](https://github.com/docker-library/official-images). Although this is *not* a -definitive move, we temporarily won't accept more patches to the Dockerfile syntax for several -reasons: - -- Long term impact of syntax changes is a sensitive matter that require an amount of attention -the volume of Engine codebase and activity today doesn't allow us to provide. -- Allowing the Builder to be implemented as a separate utility consuming the Engine's API will -open the door for many possibilities, such as offering alternate syntaxes or DSL for existing -languages without cluttering the Engine's codebase. -- A standalone Builder will also offer the opportunity for a better dedicated group of maintainers -to own the Dockerfile syntax and decide collectively on the direction to give it. -- Our experience with official images tend to show that no new instruction or syntax expansion is -*strictly* necessary for the majority of use cases, and although we are aware many things are still -lacking for many, we cannot make it a priority yet for the above reasons. - -Again, this is not about saying that the Dockerfile syntax is done, it's about making choices about -what we want to do first! - -## 2.3 Remote Registry Operations - -A large amount of work is ongoing in the area of image distribution and -provenance. This includes moving to the V2 Registry API and heavily -refactoring the code that powers these features. The desired result is more -secure, reliable and easier to use image distribution. - -Part of the problem with this part of the code base is the lack of a stable -and flexible interface. If new features are added that access the registry -without solidifying these interfaces, achieving feature parity will continue -to be elusive. While we get a handle on this situation, we are imposing a -moratorium on new code that accesses the Registry API in commands that don't -already make remote calls. - -Currently, only the following commands cause interaction with a remote -registry: - -- push -- pull -- run -- build -- search -- login - -In the interest of stabilizing the registry access model during this ongoing -work, we are not accepting additions to other commands that will cause remote -interaction with the Registry API. This moratorium will lift when the goals of -the distribution project have been met. diff --git a/vendor/github.com/docker/docker/VERSION b/vendor/github.com/docker/docker/VERSION deleted file mode 100644 index a01185b4..00000000 --- a/vendor/github.com/docker/docker/VERSION +++ /dev/null @@ -1 +0,0 @@ -1.10.0-dev diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md new file mode 100644 index 00000000..453f61a1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/README.md @@ -0,0 +1,5 @@ +This directory contains code pertaining to the Docker API: + + - Used by the docker client when communicating with the docker daemon + + - Used by third party tools wishing to interface with the docker daemon diff --git a/vendor/github.com/docker/docker/api/client/attach.go b/vendor/github.com/docker/docker/api/client/attach.go new file mode 100644 index 00000000..e89644d6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/attach.go @@ -0,0 +1,109 @@ +package client + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/signal" + "github.com/docker/engine-api/types" +) + +// CmdAttach attaches to a running container. +// +// Usage: docker attach [OPTIONS] CONTAINER +func (cli *DockerCli) CmdAttach(args ...string) error { + cmd := Cli.Subcmd("attach", []string{"CONTAINER"}, Cli.DockerCommands["attach"].Description, true) + noStdin := cmd.Bool([]string{"-no-stdin"}, false, "Do not attach STDIN") + proxy := cmd.Bool([]string{"-sig-proxy"}, true, "Proxy all received signals to the process") + detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + c, err := cli.client.ContainerInspect(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + + if !c.State.Running { + return fmt.Errorf("You cannot attach to a stopped container, start it first") + } + + if c.State.Paused { + return fmt.Errorf("You cannot attach to a paused container, unpause it first") + } + + if err := cli.CheckTtyInput(!*noStdin, c.Config.Tty); err != nil { + return err + } + + if *detachKeys != "" { + cli.configFile.DetachKeys = *detachKeys + } + + options := types.ContainerAttachOptions{ + ContainerID: cmd.Arg(0), + Stream: true, + Stdin: !*noStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: cli.configFile.DetachKeys, + } + + var in io.ReadCloser + if options.Stdin { + in = cli.in + } + + if *proxy && !c.Config.Tty { + sigc := cli.forwardAllSignals(options.ContainerID) + defer signal.StopCatch(sigc) + } + + resp, err := cli.client.ContainerAttach(context.Background(), options) + if err != nil { + return err + } + defer resp.Close() + if in != nil && c.Config.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } + + if c.Config.Tty && cli.isTerminalOut { + height, width := cli.getTtySize() + // To handle the case where a user repeatedly attaches/detaches without resizing their + // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially + // resize it, then go back to normal. Without this, every attach after the first will + // require the user to manually resize or hit enter. + cli.resizeTtyTo(cmd.Arg(0), height+1, width+1, false) + + // After the above resizing occurs, the call to monitorTtySize below will handle resetting back + // to the actual size. + if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { + logrus.Debugf("Error monitoring TTY size: %s", err) + } + } + + if err := cli.holdHijackedConnection(c.Config.Tty, in, cli.out, cli.err, resp); err != nil { + return err + } + + _, status, err := getExitCode(cli, options.ContainerID) + if err != nil { + return err + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/build.go b/vendor/github.com/docker/docker/api/client/build.go new file mode 100644 index 00000000..cdba3fa5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/build.go @@ -0,0 +1,321 @@ +package client + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "runtime" + + "golang.org/x/net/context" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerignore" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/reference" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-units" +) + +type translatorFunc func(reference.NamedTagged) (reference.Canonical, error) + +// CmdBuild builds a new image from the source code at a given path. +// +// If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN. +// +// Usage: docker build [OPTIONS] PATH | URL | - +func (cli *DockerCli) CmdBuild(args ...string) error { + cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, Cli.DockerCommands["build"].Description, true) + flTags := opts.NewListOpts(validateTag) + cmd.Var(&flTags, []string{"t", "-tag"}, "Name and optionally a tag in the 'name:tag' format") + suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the build output and print image ID on success") + noCache := cmd.Bool([]string{"-no-cache"}, false, "Do not use cache when building the image") + rm := cmd.Bool([]string{"-rm"}, true, "Remove intermediate containers after a successful build") + forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers") + pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image") + dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") + flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") + flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flShmSize := cmd.String([]string{"-shm-size"}, "", "Size of /dev/shm, default value is 64MB") + flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period") + flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") + flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") + flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") + flBuildArg := opts.NewListOpts(runconfigopts.ValidateEnv) + cmd.Var(&flBuildArg, []string{"-build-arg"}, "Set build-time variables") + isolation := cmd.String([]string{"-isolation"}, "", "Container isolation technology") + + flLabels := opts.NewListOpts(nil) + cmd.Var(&flLabels, []string{"-label"}, "Set metadata for an image") + + ulimits := make(map[string]*units.Ulimit) + flUlimits := runconfigopts.NewUlimitOpt(&ulimits) + cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") + + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + var ( + ctx io.ReadCloser + err error + ) + + specifiedContext := cmd.Arg(0) + + var ( + contextDir string + tempDir string + relDockerfile string + progBuff io.Writer + buildBuff io.Writer + ) + + progBuff = cli.out + buildBuff = cli.out + if *suppressOutput { + progBuff = bytes.NewBuffer(nil) + buildBuff = bytes.NewBuffer(nil) + } + + switch { + case specifiedContext == "-": + ctx, relDockerfile, err = builder.GetContextFromReader(cli.in, *dockerfileName) + case urlutil.IsGitURL(specifiedContext): + tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, *dockerfileName) + case urlutil.IsURL(specifiedContext): + ctx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, *dockerfileName) + default: + contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, *dockerfileName) + } + + if err != nil { + if *suppressOutput && urlutil.IsURL(specifiedContext) { + fmt.Fprintln(cli.err, progBuff) + } + return fmt.Errorf("unable to prepare context: %s", err) + } + + if tempDir != "" { + defer os.RemoveAll(tempDir) + contextDir = tempDir + } + + if ctx == nil { + // And canonicalize dockerfile name to a platform-independent one + relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) + if err != nil { + return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) + } + + f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return err + } + + var excludes []string + if err == nil { + excludes, err = dockerignore.ReadAll(f) + if err != nil { + return err + } + } + + if err := builder.ValidateContextDirectory(contextDir, excludes); err != nil { + return fmt.Errorf("Error checking context: '%s'.", err) + } + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The daemon will remove them for us, if needed, after it + // parses the Dockerfile. Ignore errors here, as they will have been + // caught by validateContextDirectory above. + var includes = []string{"."} + keepThem1, _ := fileutils.Matches(".dockerignore", excludes) + keepThem2, _ := fileutils.Matches(relDockerfile, excludes) + if keepThem1 || keepThem2 { + includes = append(includes, ".dockerignore", relDockerfile) + } + + ctx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: excludes, + IncludeFiles: includes, + }) + if err != nil { + return err + } + } + + // Setup an upload progress bar + progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) + + var body io.Reader = progress.NewProgressReader(ctx, progressOutput, 0, "", "Sending build context to Docker daemon") + + var memory int64 + if *flMemoryString != "" { + parsedMemory, err := units.RAMInBytes(*flMemoryString) + if err != nil { + return err + } + memory = parsedMemory + } + + var memorySwap int64 + if *flMemorySwap != "" { + if *flMemorySwap == "-1" { + memorySwap = -1 + } else { + parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) + if err != nil { + return err + } + memorySwap = parsedMemorySwap + } + } + + var shmSize int64 + if *flShmSize != "" { + shmSize, err = units.RAMInBytes(*flShmSize) + if err != nil { + return err + } + } + + options := types.ImageBuildOptions{ + Context: body, + Memory: memory, + MemorySwap: memorySwap, + Tags: flTags.GetAll(), + SuppressOutput: *suppressOutput, + NoCache: *noCache, + Remove: *rm, + ForceRemove: *forceRm, + PullParent: *pull, + Isolation: container.Isolation(*isolation), + CPUSetCPUs: *flCPUSetCpus, + CPUSetMems: *flCPUSetMems, + CPUShares: *flCPUShares, + CPUQuota: *flCPUQuota, + CPUPeriod: *flCPUPeriod, + CgroupParent: *flCgroupParent, + Dockerfile: relDockerfile, + ShmSize: shmSize, + Ulimits: flUlimits.GetList(), + BuildArgs: runconfigopts.ConvertKVStringsToMap(flBuildArg.GetAll()), + AuthConfigs: cli.retrieveAuthConfigs(), + Labels: runconfigopts.ConvertKVStringsToMap(flLabels.GetAll()), + } + + response, err := cli.client.ImageBuild(context.Background(), options) + if err != nil { + return err + } + defer response.Body.Close() + + err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, cli.outFd, cli.isTerminalOut, nil) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + if *suppressOutput { + fmt.Fprintf(cli.err, "%s%s", progBuff, buildBuff) + } + return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + } + + // Windows: show error message about modified file permissions if the + // daemon isn't running Windows. + if response.OSType != "windows" && runtime.GOOS == "windows" { + fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if *suppressOutput { + fmt.Fprintf(cli.out, "%s", buildBuff) + } + + return nil +} + +// validateTag checks if the given image name can be resolved. +func validateTag(rawRepo string) (string, error) { + _, err := reference.ParseNamed(rawRepo) + if err != nil { + return "", err + } + + return rawRepo, nil +} + +var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) + +// resolvedTag records the repository, tag, and resolved digest reference +// from a Dockerfile rewrite. +type resolvedTag struct { + digestRef reference.Canonical + tagRef reference.NamedTagged +} + +// replaceDockerfileTarWrapper wraps the given input tar archive stream and +// replaces the entry with the given Dockerfile name with the contents of the +// new Dockerfile. Returns a new tar archive stream with the replaced +// Dockerfile. +func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + + defer inputTarStream.Close() + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + // Signals end of archive. + tarWriter.Close() + pipeWriter.Close() + return + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + var content io.Reader = tarReader + + if err := tarWriter.WriteHeader(hdr); err != nil { + pipeWriter.CloseWithError(err) + return + } + + if _, err := io.Copy(tarWriter, content); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + }() + + return pipeReader +} diff --git a/vendor/github.com/docker/docker/api/client/cli.go b/vendor/github.com/docker/docker/api/client/cli.go new file mode 100644 index 00000000..6c673da4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/cli.go @@ -0,0 +1,215 @@ +package client + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "runtime" + + "github.com/docker/docker/api" + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/cliconfig/credentials" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/term" + "github.com/docker/engine-api/client" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +// DockerCli represents the docker command line client. +// Instances of the client can be returned from NewDockerCli. +type DockerCli struct { + // initializing closure + init func() error + + // configFile has the client configuration file + configFile *cliconfig.ConfigFile + // in holds the input stream and closer (io.ReadCloser) for the client. + in io.ReadCloser + // out holds the output stream (io.Writer) for the client. + out io.Writer + // err holds the error stream (io.Writer) for the client. + err io.Writer + // keyFile holds the key file as a string. + keyFile string + // inFd holds the file descriptor of the client's STDIN (if valid). + inFd uintptr + // outFd holds file descriptor of the client's STDOUT (if valid). + outFd uintptr + // isTerminalIn indicates whether the client's STDIN is a TTY + isTerminalIn bool + // isTerminalOut indicates whether the client's STDOUT is a TTY + isTerminalOut bool + // client is the http client that performs all API operations + client client.APIClient + // state holds the terminal state + state *term.State +} + +// Initialize calls the init function that will setup the configuration for the client +// such as the TLS, tcp and other parameters used to run the client. +func (cli *DockerCli) Initialize() error { + if cli.init == nil { + return nil + } + return cli.init() +} + +// CheckTtyInput checks if we are trying to attach to a container tty +// from a non-tty client input stream, and if so, returns an error. +func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !cli.isTerminalIn { + return errors.New("cannot enable tty mode on non tty input") + } + return nil +} + +// PsFormat returns the format string specified in the configuration. +// String contains columns and format specification, for example {{ID}}\t{{Name}}. +func (cli *DockerCli) PsFormat() string { + return cli.configFile.PsFormat +} + +// ImagesFormat returns the format string specified in the configuration. +// String contains columns and format specification, for example {{ID}}\t{{Name}}. +func (cli *DockerCli) ImagesFormat() string { + return cli.configFile.ImagesFormat +} + +func (cli *DockerCli) setRawTerminal() error { + if cli.isTerminalIn && os.Getenv("NORAW") == "" { + state, err := term.SetRawTerminal(cli.inFd) + if err != nil { + return err + } + cli.state = state + } + return nil +} + +func (cli *DockerCli) restoreTerminal(in io.Closer) error { + if cli.state != nil { + term.RestoreTerminal(cli.inFd, cli.state) + } + // WARNING: DO NOT REMOVE THE OS CHECK !!! + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if in != nil && runtime.GOOS != "darwin" { + return in.Close() + } + return nil +} + +// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. +// The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config +// is set the client scheme will be set to https. +// The client will be given a 32-second timeout (see https://github.com/docker/docker/pull/8035). +func NewDockerCli(in io.ReadCloser, out, err io.Writer, clientFlags *cli.ClientFlags) *DockerCli { + cli := &DockerCli{ + in: in, + out: out, + err: err, + keyFile: clientFlags.Common.TrustKey, + } + + cli.init = func() error { + clientFlags.PostParse() + configFile, e := cliconfig.Load(cliconfig.ConfigDir()) + if e != nil { + fmt.Fprintf(cli.err, "WARNING: Error loading config file:%v\n", e) + } + if !configFile.ContainsAuth() { + credentials.DetectDefaultStore(configFile) + } + cli.configFile = configFile + + host, err := getServerHost(clientFlags.Common.Hosts, clientFlags.Common.TLSOptions) + if err != nil { + return err + } + + customHeaders := cli.configFile.HTTPHeaders + if customHeaders == nil { + customHeaders = map[string]string{} + } + customHeaders["User-Agent"] = clientUserAgent() + + verStr := api.DefaultVersion.String() + if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { + verStr = tmpStr + } + + httpClient, err := newHTTPClient(host, clientFlags.Common.TLSOptions) + if err != nil { + return err + } + + client, err := client.NewClient(host, verStr, httpClient, customHeaders) + if err != nil { + return err + } + cli.client = client + + if cli.in != nil { + cli.inFd, cli.isTerminalIn = term.GetFdInfo(cli.in) + } + if cli.out != nil { + cli.outFd, cli.isTerminalOut = term.GetFdInfo(cli.out) + } + + return nil + } + + return cli +} + +func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { + switch len(hosts) { + case 0: + host = os.Getenv("DOCKER_HOST") + case 1: + host = hosts[0] + default: + return "", errors.New("Please specify only one -H") + } + + host, err = opts.ParseHost(tlsOptions != nil, host) + return +} + +func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { + if tlsOptions == nil { + // let the api client configure the default transport. + return nil, nil + } + + config, err := tlsconfig.Client(*tlsOptions) + if err != nil { + return nil, err + } + tr := &http.Transport{ + TLSClientConfig: config, + } + proto, addr, _, err := client.ParseHost(host) + if err != nil { + return nil, err + } + + sockets.ConfigureTransport(tr, proto, addr) + + return &http.Client{ + Transport: tr, + }, nil +} + +func clientUserAgent() string { + return "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" +} diff --git a/vendor/github.com/docker/docker/api/client/client.go b/vendor/github.com/docker/docker/api/client/client.go new file mode 100644 index 00000000..4cfce5f6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/client.go @@ -0,0 +1,5 @@ +// Package client provides a command-line interface for Docker. +// +// Run "docker help SUBCOMMAND" or "docker SUBCOMMAND --help" to see more information on any Docker subcommand, including the full list of options supported for the subcommand. +// See https://docs.docker.com/installation/ for instructions on installing Docker. +package client diff --git a/vendor/github.com/docker/docker/api/client/commit.go b/vendor/github.com/docker/docker/api/client/commit.go new file mode 100644 index 00000000..6bec4297 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/commit.go @@ -0,0 +1,85 @@ +package client + +import ( + "encoding/json" + "errors" + "fmt" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" +) + +// CmdCommit creates a new image from a container's changes. +// +// Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] +func (cli *DockerCli) CmdCommit(args ...string) error { + cmd := Cli.Subcmd("commit", []string{"CONTAINER [REPOSITORY[:TAG]]"}, Cli.DockerCommands["commit"].Description, true) + flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit") + flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") + flAuthor := cmd.String([]string{"a", "-author"}, "", "Author (e.g., \"John Hannibal Smith \")") + flChanges := opts.NewListOpts(nil) + cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") + // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. + flConfig := cmd.String([]string{"#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") + cmd.Require(flag.Max, 2) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + name = cmd.Arg(0) + repositoryAndTag = cmd.Arg(1) + repositoryName string + tag string + ) + + //Check if the given image name can be resolved + if repositoryAndTag != "" { + ref, err := reference.ParseNamed(repositoryAndTag) + if err != nil { + return err + } + + repositoryName = ref.Name() + + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot commit to digest reference") + case reference.NamedTagged: + tag = x.Tag() + } + } + + var config *container.Config + if *flConfig != "" { + config = &container.Config{} + if err := json.Unmarshal([]byte(*flConfig), config); err != nil { + return err + } + } + + options := types.ContainerCommitOptions{ + ContainerID: name, + RepositoryName: repositoryName, + Tag: tag, + Comment: *flComment, + Author: *flAuthor, + Changes: flChanges.GetAll(), + Pause: *flPause, + Config: config, + } + + response, err := cli.client.ContainerCommit(context.Background(), options) + if err != nil { + return err + } + + fmt.Fprintln(cli.out, response.ID) + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/cp.go b/vendor/github.com/docker/docker/api/client/cp.go new file mode 100644 index 00000000..61005602 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/cp.go @@ -0,0 +1,298 @@ +package client + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/archive" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/system" + "github.com/docker/engine-api/types" +) + +type copyDirection int + +const ( + fromContainer copyDirection = (1 << iota) + toContainer + acrossContainers = fromContainer | toContainer +) + +type cpConfig struct { + followLink bool +} + +// CmdCp copies files/folders to or from a path in a container. +// +// When copying from a container, if DEST_PATH is '-' the data is written as a +// tar archive file to STDOUT. +// +// When copying to a container, if SRC_PATH is '-' the data is read as a tar +// archive file from STDIN, and the destination CONTAINER:DEST_PATH, must specify +// a directory. +// +// Usage: +// docker cp CONTAINER:SRC_PATH DEST_PATH|- +// docker cp SRC_PATH|- CONTAINER:DEST_PATH +func (cli *DockerCli) CmdCp(args ...string) error { + cmd := Cli.Subcmd( + "cp", + []string{"CONTAINER:SRC_PATH DEST_PATH|-", "SRC_PATH|- CONTAINER:DEST_PATH"}, + strings.Join([]string{ + Cli.DockerCommands["cp"].Description, + "\nUse '-' as the source to read a tar archive from stdin\n", + "and extract it to a directory destination in a container.\n", + "Use '-' as the destination to stream a tar archive of a\n", + "container source to stdout.", + }, ""), + true, + ) + + followLink := cmd.Bool([]string{"L", "-follow-link"}, false, "Always follow symbol link in SRC_PATH") + + cmd.Require(flag.Exact, 2) + cmd.ParseFlags(args, true) + + if cmd.Arg(0) == "" { + return fmt.Errorf("source can not be empty") + } + if cmd.Arg(1) == "" { + return fmt.Errorf("destination can not be empty") + } + + srcContainer, srcPath := splitCpArg(cmd.Arg(0)) + dstContainer, dstPath := splitCpArg(cmd.Arg(1)) + + var direction copyDirection + if srcContainer != "" { + direction |= fromContainer + } + if dstContainer != "" { + direction |= toContainer + } + + cpParam := &cpConfig{ + followLink: *followLink, + } + + switch direction { + case fromContainer: + return cli.copyFromContainer(srcContainer, srcPath, dstPath, cpParam) + case toContainer: + return cli.copyToContainer(srcPath, dstContainer, dstPath, cpParam) + case acrossContainers: + // Copying between containers isn't supported. + return fmt.Errorf("copying between containers is not supported") + default: + // User didn't specify any container. + return fmt.Errorf("must specify at least one container source") + } +} + +// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be +// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by +// requiring a LOCALPATH with a `:` to be made explicit with a relative or +// absolute path: +// `/path/to/file:name.txt` or `./file:name.txt` +// +// This is apparently how `scp` handles this as well: +// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ +// +// We can't simply check for a filepath separator because container names may +// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, +// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows +// client, a `:` could be part of an absolute Windows path, in which case it +// is immediately proceeded by a backslash. +func splitCpArg(arg string) (container, path string) { + if system.IsAbs(arg) { + // Explicit local absolute path, e.g., `C:\foo` or `/foo`. + return "", arg + } + + parts := strings.SplitN(arg, ":", 2) + + if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { + // Either there's no `:` in the arg + // OR it's an explicit local relative path like `./file:name.txt`. + return "", arg + } + + return parts[0], parts[1] +} + +func (cli *DockerCli) statContainerPath(containerName, path string) (types.ContainerPathStat, error) { + return cli.client.ContainerStatPath(context.Background(), containerName, path) +} + +func resolveLocalPath(localPath string) (absPath string, err error) { + if absPath, err = filepath.Abs(localPath); err != nil { + return + } + + return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil +} + +func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { + if dstPath != "-" { + // Get an absolute destination path. + dstPath, err = resolveLocalPath(dstPath) + if err != nil { + return err + } + } + + // if client requests to follow symbol link, then must decide target file to be copied + var rebaseName string + if cpParam.followLink { + srcStat, err := cli.statContainerPath(srcContainer, srcPath) + + // If the destination is a symbolic link, we should follow it. + if err == nil && srcStat.Mode&os.ModeSymlink != 0 { + linkTarget := srcStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + srcParent, _ := archive.SplitPathDirEntry(srcPath) + linkTarget = filepath.Join(srcParent, linkTarget) + } + + linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) + srcPath = linkTarget + } + + } + + content, stat, err := cli.client.CopyFromContainer(context.Background(), srcContainer, srcPath) + if err != nil { + return err + } + defer content.Close() + + if dstPath == "-" { + // Send the response to STDOUT. + _, err = io.Copy(os.Stdout, content) + + return err + } + + // Prepare source copy info. + srcInfo := archive.CopyInfo{ + Path: srcPath, + Exists: true, + IsDir: stat.Mode.IsDir(), + RebaseName: rebaseName, + } + + preArchive := content + if len(srcInfo.RebaseName) != 0 { + _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) + preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) + } + // See comments in the implementation of `archive.CopyTo` for exactly what + // goes into deciding how and whether the source archive needs to be + // altered for the correct copy behavior. + return archive.CopyTo(preArchive, srcInfo, dstPath) +} + +func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { + if srcPath != "-" { + // Get an absolute source path. + srcPath, err = resolveLocalPath(srcPath) + if err != nil { + return err + } + } + + // In order to get the copy behavior right, we need to know information + // about both the source and destination. The API is a simple tar + // archive/extract API but we can use the stat info header about the + // destination to be more informed about exactly what the destination is. + + // Prepare destination copy info by stat-ing the container path. + dstInfo := archive.CopyInfo{Path: dstPath} + dstStat, err := cli.statContainerPath(dstContainer, dstPath) + + // If the destination is a symbolic link, we should evaluate it. + if err == nil && dstStat.Mode&os.ModeSymlink != 0 { + linkTarget := dstStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := archive.SplitPathDirEntry(dstPath) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + dstInfo.Path = linkTarget + dstStat, err = cli.statContainerPath(dstContainer, linkTarget) + } + + // Ignore any error and assume that the parent directory of the destination + // path exists, in which case the copy may still succeed. If there is any + // type of conflict (e.g., non-directory overwriting an existing directory + // or vice versa) the extraction will fail. If the destination simply did + // not exist, but the parent directory does, the extraction will still + // succeed. + if err == nil { + dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() + } + + var ( + content io.Reader + resolvedDstPath string + ) + + if srcPath == "-" { + // Use STDIN. + content = os.Stdin + resolvedDstPath = dstInfo.Path + if !dstInfo.IsDir { + return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) + } + } else { + // Prepare source copy info. + srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink) + if err != nil { + return err + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return err + } + defer srcArchive.Close() + + // With the stat info about the local source as well as the + // destination, we have enough information to know whether we need to + // alter the archive that we upload so that when the server extracts + // it to the specified directory in the container we get the desired + // copy behavior. + + // See comments in the implementation of `archive.PrepareArchiveCopy` + // for exactly what goes into deciding how and whether the source + // archive needs to be altered for the correct copy behavior when it is + // extracted. This function also infers from the source and destination + // info which directory to extract to, which may be the parent of the + // destination that the user specified. + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return err + } + defer preparedArchive.Close() + + resolvedDstPath = dstDir + content = preparedArchive + } + + options := types.CopyToContainerOptions{ + ContainerID: dstContainer, + Path: resolvedDstPath, + Content: content, + AllowOverwriteDirWithFile: false, + } + + return cli.client.CopyToContainer(context.Background(), options) +} diff --git a/vendor/github.com/docker/docker/api/client/create.go b/vendor/github.com/docker/docker/api/client/create.go new file mode 100644 index 00000000..176bd8e9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/create.go @@ -0,0 +1,164 @@ +package client + +import ( + "fmt" + "io" + "os" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" +) + +func (cli *DockerCli) pullImage(image string) error { + return cli.pullImageCustomOut(image, cli.out) +} + +func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { + ref, err := reference.ParseNamed(image) + if err != nil { + return err + } + + var tag string + switch x := reference.WithDefaultTag(ref).(type) { + case reference.Canonical: + tag = x.Digest().String() + case reference.NamedTagged: + tag = x.Tag() + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + authConfig := cli.resolveAuthConfig(repoInfo.Index) + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageCreateOptions{ + Parent: ref.Name(), + Tag: tag, + RegistryAuth: encodedAuth, + } + + responseBody, err := cli.client.ImageCreate(context.Background(), options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, out, cli.outFd, cli.isTerminalOut, nil) +} + +type cidFile struct { + path string + file *os.File + written bool +} + +func newCIDFile(path string) (*cidFile, error) { + if _, err := os.Stat(path); err == nil { + return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) + } + + f, err := os.Create(path) + if err != nil { + return nil, fmt.Errorf("Failed to create the container ID file: %s", err) + } + + return &cidFile{path: path, file: f}, nil +} + +func (cli *DockerCli) createContainer(config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*types.ContainerCreateResponse, error) { + var containerIDFile *cidFile + if cidfile != "" { + var err error + if containerIDFile, err = newCIDFile(cidfile); err != nil { + return nil, err + } + defer containerIDFile.Close() + } + + _, ref, err := reference.ParseIDOrReference(config.Image) + if err != nil { + return nil, err + } + if ref != nil { + ref = reference.WithDefaultTag(ref) + } + + //create the container + response, err := cli.client.ContainerCreate(context.Background(), config, hostConfig, networkingConfig, name) + + //if image not found try to pull it + if err != nil { + if client.IsErrImageNotFound(err) && ref != nil { + fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", ref.String()) + + // we don't want to write to stdout anything apart from container.ID + if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { + return nil, err + } + // Retry + var retryErr error + response, retryErr = cli.client.ContainerCreate(context.Background(), config, hostConfig, networkingConfig, name) + if retryErr != nil { + return nil, retryErr + } + } else { + return nil, err + } + } + + for _, warning := range response.Warnings { + fmt.Fprintf(cli.err, "WARNING: %s\n", warning) + } + if containerIDFile != nil { + if err = containerIDFile.Write(response.ID); err != nil { + return nil, err + } + } + return &response, nil +} + +// CmdCreate creates a new container from a given image. +// +// Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] +func (cli *DockerCli) CmdCreate(args ...string) error { + cmd := Cli.Subcmd("create", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["create"].Description, true) + + // These are flags not stored in Config/HostConfig + var ( + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") + ) + + config, hostConfig, networkingConfig, cmd, err := runconfigopts.Parse(cmd, args) + + if err != nil { + cmd.ReportError(err.Error(), true) + os.Exit(1) + } + if config.Image == "" { + cmd.Usage() + return nil + } + response, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "%s\n", response.ID) + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/diff.go b/vendor/github.com/docker/docker/api/client/diff.go new file mode 100644 index 00000000..e17768fd --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/diff.go @@ -0,0 +1,49 @@ +package client + +import ( + "fmt" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/archive" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdDiff shows changes on a container's filesystem. +// +// Each changed file is printed on a separate line, prefixed with a single +// character that indicates the status of the file: C (modified), A (added), +// or D (deleted). +// +// Usage: docker diff CONTAINER +func (cli *DockerCli) CmdDiff(args ...string) error { + cmd := Cli.Subcmd("diff", []string{"CONTAINER"}, Cli.DockerCommands["diff"].Description, true) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + if cmd.Arg(0) == "" { + return fmt.Errorf("Container name cannot be empty") + } + + changes, err := cli.client.ContainerDiff(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + + for _, change := range changes { + var kind string + switch change.Kind { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + fmt.Fprintf(cli.out, "%s %s\n", kind, change.Path) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/events.go b/vendor/github.com/docker/docker/api/client/events.go new file mode 100644 index 00000000..d2408c19 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/events.go @@ -0,0 +1,146 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonlog" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" + eventtypes "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" +) + +// CmdEvents prints a live stream of real time events from the server. +// +// Usage: docker events [OPTIONS] +func (cli *DockerCli) CmdEvents(args ...string) error { + cmd := Cli.Subcmd("events", nil, Cli.DockerCommands["events"].Description, true) + since := cmd.String([]string{"-since"}, "", "Show all events created since timestamp") + until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + eventFilterArgs := filters.NewArgs() + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + for _, f := range flFilter.GetAll() { + var err error + eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) + if err != nil { + return err + } + } + + options := types.EventsOptions{ + Since: *since, + Until: *until, + Filters: eventFilterArgs, + } + + responseBody, err := cli.client.Events(context.Background(), options) + if err != nil { + return err + } + defer responseBody.Close() + + return streamEvents(responseBody, cli.out) +} + +// streamEvents decodes prints the incoming events in the provided output. +func streamEvents(input io.Reader, output io.Writer) error { + return decodeEvents(input, func(event eventtypes.Message, err error) error { + if err != nil { + return err + } + printOutput(event, output) + return nil + }) +} + +type eventProcessor func(event eventtypes.Message, err error) error + +func decodeEvents(input io.Reader, ep eventProcessor) error { + dec := json.NewDecoder(input) + for { + var event eventtypes.Message + err := dec.Decode(&event) + if err != nil && err == io.EOF { + break + } + + if procErr := ep(event, err); procErr != nil { + return procErr + } + } + return nil +} + +// printOutput prints all types of event information. +// Each output includes the event type, actor id, name and action. +// Actor attributes are printed at the end if the actor has any. +func printOutput(event eventtypes.Message, output io.Writer) { + if event.TimeNano != 0 { + fmt.Fprintf(output, "%s ", time.Unix(0, event.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + } else if event.Time != 0 { + fmt.Fprintf(output, "%s ", time.Unix(event.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + } + + fmt.Fprintf(output, "%s %s %s", event.Type, event.Action, event.Actor.ID) + + if len(event.Actor.Attributes) > 0 { + var attrs []string + var keys []string + for k := range event.Actor.Attributes { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := event.Actor.Attributes[k] + attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) + } + fmt.Fprintf(output, " (%s)", strings.Join(attrs, ", ")) + } + fmt.Fprint(output, "\n") +} + +type eventHandler struct { + handlers map[string]func(eventtypes.Message) + mu sync.Mutex +} + +func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { + w.mu.Lock() + w.handlers[action] = h + w.mu.Unlock() +} + +// Watch ranges over the passed in event chan and processes the events based on the +// handlers created for a given action. +// To stop watching, close the event chan. +func (w *eventHandler) Watch(c <-chan eventtypes.Message) { + for e := range c { + w.mu.Lock() + h, exists := w.handlers[e.Action] + w.mu.Unlock() + if !exists { + continue + } + logrus.Debugf("event handler: received event: %v", e) + go h(e) + } +} diff --git a/vendor/github.com/docker/docker/api/client/exec.go b/vendor/github.com/docker/docker/api/client/exec.go new file mode 100644 index 00000000..520c3a38 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/exec.go @@ -0,0 +1,166 @@ +package client + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/promise" + "github.com/docker/engine-api/types" +) + +// CmdExec runs a command in a running container. +// +// Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] +func (cli *DockerCli) CmdExec(args ...string) error { + cmd := Cli.Subcmd("exec", []string{"CONTAINER COMMAND [ARG...]"}, Cli.DockerCommands["exec"].Description, true) + detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + + execConfig, err := ParseExec(cmd, args) + // just in case the ParseExec does not exit + if execConfig.Container == "" || err != nil { + return Cli.StatusError{StatusCode: 1} + } + + if *detachKeys != "" { + cli.configFile.DetachKeys = *detachKeys + } + + // Send client escape keys + execConfig.DetachKeys = cli.configFile.DetachKeys + + response, err := cli.client.ContainerExecCreate(context.Background(), *execConfig) + if err != nil { + return err + } + + execID := response.ID + if execID == "" { + fmt.Fprintf(cli.out, "exec ID empty") + return nil + } + + //Temp struct for execStart so that we don't need to transfer all the execConfig + if !execConfig.Detach { + if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { + return err + } + } else { + execStartCheck := types.ExecStartCheck{ + Detach: execConfig.Detach, + Tty: execConfig.Tty, + } + + if err := cli.client.ContainerExecStart(context.Background(), execID, execStartCheck); err != nil { + return err + } + // For now don't print this - wait for when we support exec wait() + // fmt.Fprintf(cli.out, "%s\n", execID) + return nil + } + + // Interactive exec requested. + var ( + out, stderr io.Writer + in io.ReadCloser + errCh chan error + ) + + if execConfig.AttachStdin { + in = cli.in + } + if execConfig.AttachStdout { + out = cli.out + } + if execConfig.AttachStderr { + if execConfig.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + + resp, err := cli.client.ContainerExecAttach(context.Background(), execID, *execConfig) + if err != nil { + return err + } + defer resp.Close() + if in != nil && execConfig.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } + errCh = promise.Go(func() error { + return cli.holdHijackedConnection(execConfig.Tty, in, out, stderr, resp) + }) + + if execConfig.Tty && cli.isTerminalIn { + if err := cli.monitorTtySize(execID, true); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + + var status int + if _, status, err = getExecExitCode(cli, execID); err != nil { + return err + } + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + + return nil +} + +// ParseExec parses the specified args for the specified command and generates +// an ExecConfig from it. +// If the minimal number of specified args is not right or if specified args are +// not valid, it will return an error. +func ParseExec(cmd *flag.FlagSet, args []string) (*types.ExecConfig, error) { + var ( + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") + flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to the command") + execCmd []string + container string + ) + cmd.Require(flag.Min, 2) + if err := cmd.ParseFlags(args, true); err != nil { + return nil, err + } + container = cmd.Arg(0) + parsedArgs := cmd.Args() + execCmd = parsedArgs[1:] + + execConfig := &types.ExecConfig{ + User: *flUser, + Privileged: *flPrivileged, + Tty: *flTty, + Cmd: execCmd, + Container: container, + Detach: *flDetach, + } + + // If -d is not set, attach to everything by default + if !*flDetach { + execConfig.AttachStdout = true + execConfig.AttachStderr = true + if *flStdin { + execConfig.AttachStdin = true + } + } + + return execConfig, nil +} diff --git a/vendor/github.com/docker/docker/api/client/export.go b/vendor/github.com/docker/docker/api/client/export.go new file mode 100644 index 00000000..a1d3ebe7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/export.go @@ -0,0 +1,42 @@ +package client + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdExport exports a filesystem as a tar archive. +// +// The tar archive is streamed to STDOUT by default or written to a file. +// +// Usage: docker export [OPTIONS] CONTAINER +func (cli *DockerCli) CmdExport(args ...string) error { + cmd := Cli.Subcmd("export", []string{"CONTAINER"}, Cli.DockerCommands["export"].Description, true) + outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + if *outfile == "" && cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + responseBody, err := cli.client.ContainerExport(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + defer responseBody.Close() + + if *outfile == "" { + _, err := io.Copy(cli.out, responseBody) + return err + } + + return copyToFile(*outfile, responseBody) + +} diff --git a/vendor/github.com/docker/docker/api/client/formatter/custom.go b/vendor/github.com/docker/docker/api/client/formatter/custom.go new file mode 100644 index 00000000..2bb26a3d --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/formatter/custom.go @@ -0,0 +1,242 @@ +package formatter + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/engine-api/types" + "github.com/docker/go-units" +) + +const ( + tableKey = "table" + + containerIDHeader = "CONTAINER ID" + imageHeader = "IMAGE" + namesHeader = "NAMES" + commandHeader = "COMMAND" + createdSinceHeader = "CREATED" + createdAtHeader = "CREATED AT" + runningForHeader = "CREATED" + statusHeader = "STATUS" + portsHeader = "PORTS" + sizeHeader = "SIZE" + labelsHeader = "LABELS" + imageIDHeader = "IMAGE ID" + repositoryHeader = "REPOSITORY" + tagHeader = "TAG" + digestHeader = "DIGEST" + mountsHeader = "MOUNTS" +) + +type containerContext struct { + baseSubContext + trunc bool + c types.Container +} + +func (c *containerContext) ID() string { + c.addHeader(containerIDHeader) + if c.trunc { + return stringid.TruncateID(c.c.ID) + } + return c.c.ID +} + +func (c *containerContext) Names() string { + c.addHeader(namesHeader) + names := stripNamePrefix(c.c.Names) + if c.trunc { + for _, name := range names { + if len(strings.Split(name, "/")) == 1 { + names = []string{name} + break + } + } + } + return strings.Join(names, ",") +} + +func (c *containerContext) Image() string { + c.addHeader(imageHeader) + if c.c.Image == "" { + return "" + } + if c.trunc { + if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { + return trunc + } + } + return c.c.Image +} + +func (c *containerContext) Command() string { + c.addHeader(commandHeader) + command := c.c.Command + if c.trunc { + command = stringutils.Truncate(command, 20) + } + return strconv.Quote(command) +} + +func (c *containerContext) CreatedAt() string { + c.addHeader(createdAtHeader) + return time.Unix(int64(c.c.Created), 0).String() +} + +func (c *containerContext) RunningFor() string { + c.addHeader(runningForHeader) + createdAt := time.Unix(int64(c.c.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *containerContext) Ports() string { + c.addHeader(portsHeader) + return api.DisplayablePorts(c.c.Ports) +} + +func (c *containerContext) Status() string { + c.addHeader(statusHeader) + return c.c.Status +} + +func (c *containerContext) Size() string { + c.addHeader(sizeHeader) + srw := units.HumanSize(float64(c.c.SizeRw)) + sv := units.HumanSize(float64(c.c.SizeRootFs)) + + sf := srw + if c.c.SizeRootFs > 0 { + sf = fmt.Sprintf("%s (virtual %s)", srw, sv) + } + return sf +} + +func (c *containerContext) Labels() string { + c.addHeader(labelsHeader) + if c.c.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.c.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *containerContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.addHeader(h) + + if c.c.Labels == nil { + return "" + } + return c.c.Labels[name] +} + +func (c *containerContext) Mounts() string { + c.addHeader(mountsHeader) + + var name string + var mounts []string + for _, m := range c.c.Mounts { + if m.Name == "" { + name = m.Source + } else { + name = m.Name + } + if c.trunc { + name = stringutils.Truncate(name, 15) + } + mounts = append(mounts, name) + } + return strings.Join(mounts, ",") +} + +type imageContext struct { + baseSubContext + trunc bool + i types.Image + repo string + tag string + digest string +} + +func (c *imageContext) ID() string { + c.addHeader(imageIDHeader) + if c.trunc { + return stringid.TruncateID(c.i.ID) + } + return c.i.ID +} + +func (c *imageContext) Repository() string { + c.addHeader(repositoryHeader) + return c.repo +} + +func (c *imageContext) Tag() string { + c.addHeader(tagHeader) + return c.tag +} + +func (c *imageContext) Digest() string { + c.addHeader(digestHeader) + return c.digest +} + +func (c *imageContext) CreatedSince() string { + c.addHeader(createdSinceHeader) + createdAt := time.Unix(int64(c.i.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *imageContext) CreatedAt() string { + c.addHeader(createdAtHeader) + return time.Unix(int64(c.i.Created), 0).String() +} + +func (c *imageContext) Size() string { + c.addHeader(sizeHeader) + return units.HumanSize(float64(c.i.Size)) +} + +type subContext interface { + fullHeader() string + addHeader(header string) +} + +type baseSubContext struct { + header []string +} + +func (c *baseSubContext) fullHeader() string { + if c.header == nil { + return "" + } + return strings.Join(c.header, "\t") +} + +func (c *baseSubContext) addHeader(header string) { + if c.header == nil { + c.header = []string{} + } + c.header = append(c.header, strings.ToUpper(header)) +} + +func stripNamePrefix(ss []string) []string { + for i, s := range ss { + ss[i] = s[1:] + } + + return ss +} diff --git a/vendor/github.com/docker/docker/api/client/formatter/formatter.go b/vendor/github.com/docker/docker/api/client/formatter/formatter.go new file mode 100644 index 00000000..bc3d50c9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/formatter/formatter.go @@ -0,0 +1,255 @@ +package formatter + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/tabwriter" + "text/template" + + "github.com/docker/docker/reference" + "github.com/docker/docker/utils/templates" + "github.com/docker/engine-api/types" +) + +const ( + tableFormatKey = "table" + rawFormatKey = "raw" + + defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}" + defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" + defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" + defaultQuietFormat = "{{.ID}}" +) + +// Context contains information required by the formatter to print the output as desired. +type Context struct { + // Output is the output stream to which the formatted string is written. + Output io.Writer + // Format is used to choose raw, table or custom format for the output. + Format string + // Quiet when set to true will simply print minimal information. + Quiet bool + // Trunc when set to true will truncate the output of certain fields such as Container ID. + Trunc bool + + // internal element + table bool + finalFormat string + header string + buffer *bytes.Buffer +} + +func (c *Context) preformat() { + c.finalFormat = c.Format + + if strings.HasPrefix(c.Format, tableKey) { + c.table = true + c.finalFormat = c.finalFormat[len(tableKey):] + } + + c.finalFormat = strings.Trim(c.finalFormat, " ") + r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") + c.finalFormat = r.Replace(c.finalFormat) +} + +func (c *Context) parseFormat() (*template.Template, error) { + tmpl, err := templates.Parse(c.finalFormat) + if err != nil { + c.buffer.WriteString(fmt.Sprintf("Template parsing error: %v\n", err)) + c.buffer.WriteTo(c.Output) + } + return tmpl, err +} + +func (c *Context) postformat(tmpl *template.Template, subContext subContext) { + if c.table { + if len(c.header) == 0 { + // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template + tmpl.Execute(bytes.NewBufferString(""), subContext) + c.header = subContext.fullHeader() + } + + t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) + t.Write([]byte(c.header)) + t.Write([]byte("\n")) + c.buffer.WriteTo(t) + t.Flush() + } else { + c.buffer.WriteTo(c.Output) + } +} + +func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { + if err := tmpl.Execute(c.buffer, subContext); err != nil { + c.buffer = bytes.NewBufferString(fmt.Sprintf("Template parsing error: %v\n", err)) + c.buffer.WriteTo(c.Output) + return err + } + if c.table && len(c.header) == 0 { + c.header = subContext.fullHeader() + } + c.buffer.WriteString("\n") + return nil +} + +// ContainerContext contains container specific information required by the formater, encapsulate a Context struct. +type ContainerContext struct { + Context + // Size when set to true will display the size of the output. + Size bool + // Containers + Containers []types.Container +} + +// ImageContext contains image specific information required by the formater, encapsulate a Context struct. +type ImageContext struct { + Context + Digest bool + // Images + Images []types.Image +} + +func (ctx ContainerContext) Write() { + switch ctx.Format { + case tableFormatKey: + ctx.Format = defaultContainerTableFormat + if ctx.Quiet { + ctx.Format = defaultQuietFormat + } + case rawFormatKey: + if ctx.Quiet { + ctx.Format = `container_id: {{.ID}}` + } else { + ctx.Format = `container_id: {{.ID}} +image: {{.Image}} +command: {{.Command}} +created_at: {{.CreatedAt}} +status: {{.Status}} +names: {{.Names}} +labels: {{.Labels}} +ports: {{.Ports}} +` + if ctx.Size { + ctx.Format += `size: {{.Size}} +` + } + } + } + + ctx.buffer = bytes.NewBufferString("") + ctx.preformat() + if ctx.table && ctx.Size { + ctx.finalFormat += "\t{{.Size}}" + } + + tmpl, err := ctx.parseFormat() + if err != nil { + return + } + + for _, container := range ctx.Containers { + containerCtx := &containerContext{ + trunc: ctx.Trunc, + c: container, + } + err = ctx.contextFormat(tmpl, containerCtx) + if err != nil { + return + } + } + + ctx.postformat(tmpl, &containerContext{}) +} + +func (ctx ImageContext) Write() { + switch ctx.Format { + case tableFormatKey: + ctx.Format = defaultImageTableFormat + if ctx.Digest { + ctx.Format = defaultImageTableFormatWithDigest + } + if ctx.Quiet { + ctx.Format = defaultQuietFormat + } + case rawFormatKey: + if ctx.Quiet { + ctx.Format = `image_id: {{.ID}}` + } else { + if ctx.Digest { + ctx.Format = `repository: {{ .Repository }} +tag: {{.Tag}} +digest: {{.Digest}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + } else { + ctx.Format = `repository: {{ .Repository }} +tag: {{.Tag}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + } + } + } + + ctx.buffer = bytes.NewBufferString("") + ctx.preformat() + if ctx.table && ctx.Digest && !strings.Contains(ctx.Format, "{{.Digest}}") { + ctx.finalFormat += "\t{{.Digest}}" + } + + tmpl, err := ctx.parseFormat() + if err != nil { + return + } + + for _, image := range ctx.Images { + + repoTags := image.RepoTags + repoDigests := image.RepoDigests + + if len(repoTags) == 1 && repoTags[0] == ":" && len(repoDigests) == 1 && repoDigests[0] == "@" { + // dangling image - clear out either repoTags or repoDigests so we only show it once below + repoDigests = []string{} + } + // combine the tags and digests lists + tagsAndDigests := append(repoTags, repoDigests...) + for _, repoAndRef := range tagsAndDigests { + repo := "" + tag := "" + digest := "" + + if !strings.HasPrefix(repoAndRef, "") { + ref, err := reference.ParseNamed(repoAndRef) + if err != nil { + continue + } + repo = ref.Name() + + switch x := ref.(type) { + case reference.Canonical: + digest = x.Digest().String() + case reference.NamedTagged: + tag = x.Tag() + } + } + imageCtx := &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: tag, + digest: digest, + } + err = ctx.contextFormat(tmpl, imageCtx) + if err != nil { + return + } + } + } + + ctx.postformat(tmpl, &imageContext{}) +} diff --git a/vendor/github.com/docker/docker/api/client/hijack.go b/vendor/github.com/docker/docker/api/client/hijack.go new file mode 100644 index 00000000..4c80fe1c --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/hijack.go @@ -0,0 +1,56 @@ +package client + +import ( + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/engine-api/types" +) + +func (cli *DockerCli) holdHijackedConnection(tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { + var err error + receiveStdout := make(chan error, 1) + if outputStream != nil || errorStream != nil { + go func() { + // When TTY is ON, use regular copy + if tty && outputStream != nil { + _, err = io.Copy(outputStream, resp.Reader) + } else { + _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) + } + logrus.Debugf("[hijack] End of stdout") + receiveStdout <- err + }() + } + + stdinDone := make(chan struct{}) + go func() { + if inputStream != nil { + io.Copy(resp.Conn, inputStream) + logrus.Debugf("[hijack] End of stdin") + } + + if err := resp.CloseWrite(); err != nil { + logrus.Debugf("Couldn't send EOF: %s", err) + } + close(stdinDone) + }() + + select { + case err := <-receiveStdout: + if err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + case <-stdinDone: + if outputStream != nil || errorStream != nil { + if err := <-receiveStdout; err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/history.go b/vendor/github.com/docker/docker/api/client/history.go new file mode 100644 index 00000000..25bb4157 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/history.go @@ -0,0 +1,76 @@ +package client + +import ( + "fmt" + "strconv" + "strings" + "text/tabwriter" + "time" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/go-units" +) + +// CmdHistory shows the history of an image. +// +// Usage: docker history [OPTIONS] IMAGE +func (cli *DockerCli) CmdHistory(args ...string) error { + cmd := Cli.Subcmd("history", []string{"IMAGE"}, Cli.DockerCommands["history"].Description, true) + human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + history, err := cli.client.ImageHistory(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + + if *quiet { + for _, entry := range history { + if *noTrunc { + fmt.Fprintf(w, "%s\n", entry.ID) + } else { + fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) + } + } + w.Flush() + return nil + } + + var imageID string + var createdBy string + var created string + var size string + + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") + for _, entry := range history { + imageID = entry.ID + createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) + if *noTrunc == false { + createdBy = stringutils.Truncate(createdBy, 45) + imageID = stringid.TruncateID(entry.ID) + } + + if *human { + created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" + size = units.HumanSize(float64(entry.Size)) + } else { + created = time.Unix(entry.Created, 0).Format(time.RFC3339) + size = strconv.FormatInt(entry.Size, 10) + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/images.go b/vendor/github.com/docker/docker/api/client/images.go new file mode 100644 index 00000000..4840b63d --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/images.go @@ -0,0 +1,81 @@ +package client + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/client/formatter" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +// CmdImages lists the images in a specified repository, or all top-level images if no repository is specified. +// +// Usage: docker images [OPTIONS] [REPOSITORY] +func (cli *DockerCli) CmdImages(args ...string) error { + cmd := Cli.Subcmd("images", []string{"[REPOSITORY[:TAG]]"}, Cli.DockerCommands["images"].Description, true) + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)") + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests") + format := cmd.String([]string{"-format"}, "", "Pretty-print images using a Go template") + + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + cmd.Require(flag.Max, 1) + + cmd.ParseFlags(args, true) + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + imageFilterArgs := filters.NewArgs() + for _, f := range flFilter.GetAll() { + var err error + imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) + if err != nil { + return err + } + } + + var matchName string + if cmd.NArg() == 1 { + matchName = cmd.Arg(0) + } + + options := types.ImageListOptions{ + MatchName: matchName, + All: *all, + Filters: imageFilterArgs, + } + + images, err := cli.client.ImageList(context.Background(), options) + if err != nil { + return err + } + + f := *format + if len(f) == 0 { + if len(cli.ImagesFormat()) > 0 && !*quiet { + f = cli.ImagesFormat() + } else { + f = "table" + } + } + + imagesCtx := formatter.ImageContext{ + Context: formatter.Context{ + Output: cli.out, + Format: f, + Quiet: *quiet, + Trunc: !*noTrunc, + }, + Digest: *showDigests, + Images: images, + } + + imagesCtx.Write() + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/import.go b/vendor/github.com/docker/docker/api/client/import.go new file mode 100644 index 00000000..c96e1e97 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/import.go @@ -0,0 +1,82 @@ +package client + +import ( + "fmt" + "io" + "os" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" +) + +// CmdImport creates an empty filesystem image, imports the contents of the tarball into the image, and optionally tags the image. +// +// The URL argument is the address of a tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) file or a path to local file relative to docker client. If the URL is '-', then the tar file is read from STDIN. +// +// Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] +func (cli *DockerCli) CmdImport(args ...string) error { + cmd := Cli.Subcmd("import", []string{"file|URL|- [REPOSITORY[:TAG]]"}, Cli.DockerCommands["import"].Description, true) + flChanges := opts.NewListOpts(nil) + cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") + message := cmd.String([]string{"m", "-message"}, "", "Set commit message for imported image") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + in io.Reader + tag string + src = cmd.Arg(0) + srcName = src + repository = cmd.Arg(1) + changes = flChanges.GetAll() + ) + + if cmd.NArg() == 3 { + fmt.Fprintf(cli.err, "[DEPRECATED] The format 'file|URL|- [REPOSITORY [TAG]]' has been deprecated. Please use file|URL|- [REPOSITORY[:TAG]]\n") + tag = cmd.Arg(2) + } + + if repository != "" { + //Check if the given image name can be resolved + if _, err := reference.ParseNamed(repository); err != nil { + return err + } + } + + if src == "-" { + in = cli.in + } else if !urlutil.IsURL(src) { + srcName = "-" + file, err := os.Open(src) + if err != nil { + return err + } + defer file.Close() + in = file + } + + options := types.ImageImportOptions{ + Source: in, + SourceName: srcName, + RepositoryName: repository, + Message: *message, + Tag: tag, + Changes: changes, + } + + responseBody, err := cli.client.ImageImport(context.Background(), options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) +} diff --git a/vendor/github.com/docker/docker/api/client/info.go b/vendor/github.com/docker/docker/api/client/info.go new file mode 100644 index 00000000..29df605e --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/info.go @@ -0,0 +1,155 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/ioutils" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils" + "github.com/docker/go-units" +) + +// CmdInfo displays system-wide information. +// +// Usage: docker info +func (cli *DockerCli) CmdInfo(args ...string) error { + cmd := Cli.Subcmd("info", nil, Cli.DockerCommands["info"].Description, true) + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + info, err := cli.client.Info(context.Background()) + if err != nil { + return err + } + + fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers) + fmt.Fprintf(cli.out, " Running: %d\n", info.ContainersRunning) + fmt.Fprintf(cli.out, " Paused: %d\n", info.ContainersPaused) + fmt.Fprintf(cli.out, " Stopped: %d\n", info.ContainersStopped) + fmt.Fprintf(cli.out, "Images: %d\n", info.Images) + ioutils.FprintfIfNotEmpty(cli.out, "Server Version: %s\n", info.ServerVersion) + ioutils.FprintfIfNotEmpty(cli.out, "Storage Driver: %s\n", info.Driver) + if info.DriverStatus != nil { + for _, pair := range info.DriverStatus { + fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) + + // print a warning if devicemapper is using a loopback file + if pair[0] == "Data loop file" { + fmt.Fprintln(cli.err, " WARNING: Usage of loopback devices is strongly discouraged for production use. Either use `--storage-opt dm.thinpooldev` or use `--storage-opt dm.no_warn_on_loop_devices=true` to suppress this warning.") + } + } + + } + if info.SystemStatus != nil { + for _, pair := range info.SystemStatus { + fmt.Fprintf(cli.out, "%s: %s\n", pair[0], pair[1]) + } + } + ioutils.FprintfIfNotEmpty(cli.out, "Execution Driver: %s\n", info.ExecutionDriver) + ioutils.FprintfIfNotEmpty(cli.out, "Logging Driver: %s\n", info.LoggingDriver) + ioutils.FprintfIfNotEmpty(cli.out, "Cgroup Driver: %s\n", info.CgroupDriver) + + fmt.Fprintf(cli.out, "Plugins: \n") + fmt.Fprintf(cli.out, " Volume:") + fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Volume, " ")) + fmt.Fprintf(cli.out, "\n") + fmt.Fprintf(cli.out, " Network:") + fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Network, " ")) + fmt.Fprintf(cli.out, "\n") + + if len(info.Plugins.Authorization) != 0 { + fmt.Fprintf(cli.out, " Authorization:") + fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Authorization, " ")) + fmt.Fprintf(cli.out, "\n") + } + + ioutils.FprintfIfNotEmpty(cli.out, "Kernel Version: %s\n", info.KernelVersion) + ioutils.FprintfIfNotEmpty(cli.out, "Operating System: %s\n", info.OperatingSystem) + ioutils.FprintfIfNotEmpty(cli.out, "OSType: %s\n", info.OSType) + ioutils.FprintfIfNotEmpty(cli.out, "Architecture: %s\n", info.Architecture) + fmt.Fprintf(cli.out, "CPUs: %d\n", info.NCPU) + fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) + ioutils.FprintfIfNotEmpty(cli.out, "Name: %s\n", info.Name) + ioutils.FprintfIfNotEmpty(cli.out, "ID: %s\n", info.ID) + fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", info.DockerRootDir) + fmt.Fprintf(cli.out, "Debug mode (client): %v\n", utils.IsDebugEnabled()) + fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug) + + if info.Debug { + fmt.Fprintf(cli.out, " File Descriptors: %d\n", info.NFd) + fmt.Fprintf(cli.out, " Goroutines: %d\n", info.NGoroutines) + fmt.Fprintf(cli.out, " System Time: %s\n", info.SystemTime) + fmt.Fprintf(cli.out, " EventsListeners: %d\n", info.NEventsListener) + } + + ioutils.FprintfIfNotEmpty(cli.out, "Http Proxy: %s\n", info.HTTPProxy) + ioutils.FprintfIfNotEmpty(cli.out, "Https Proxy: %s\n", info.HTTPSProxy) + ioutils.FprintfIfNotEmpty(cli.out, "No Proxy: %s\n", info.NoProxy) + + if info.IndexServerAddress != "" { + u := cli.configFile.AuthConfigs[info.IndexServerAddress].Username + if len(u) > 0 { + fmt.Fprintf(cli.out, "Username: %v\n", u) + } + fmt.Fprintf(cli.out, "Registry: %v\n", info.IndexServerAddress) + } + + // Only output these warnings if the server does not support these features + if info.OSType != "windows" { + if !info.MemoryLimit { + fmt.Fprintln(cli.err, "WARNING: No memory limit support") + } + if !info.SwapLimit { + fmt.Fprintln(cli.err, "WARNING: No swap limit support") + } + if !info.KernelMemory { + fmt.Fprintln(cli.err, "WARNING: No kernel memory limit support") + } + if !info.OomKillDisable { + fmt.Fprintln(cli.err, "WARNING: No oom kill disable support") + } + if !info.CPUCfsQuota { + fmt.Fprintln(cli.err, "WARNING: No cpu cfs quota support") + } + if !info.CPUCfsPeriod { + fmt.Fprintln(cli.err, "WARNING: No cpu cfs period support") + } + if !info.CPUShares { + fmt.Fprintln(cli.err, "WARNING: No cpu shares support") + } + if !info.CPUSet { + fmt.Fprintln(cli.err, "WARNING: No cpuset support") + } + if !info.IPv4Forwarding { + fmt.Fprintln(cli.err, "WARNING: IPv4 forwarding is disabled") + } + if !info.BridgeNfIptables { + fmt.Fprintln(cli.err, "WARNING: bridge-nf-call-iptables is disabled") + } + if !info.BridgeNfIP6tables { + fmt.Fprintln(cli.err, "WARNING: bridge-nf-call-ip6tables is disabled") + } + } + + if info.Labels != nil { + fmt.Fprintln(cli.out, "Labels:") + for _, attribute := range info.Labels { + fmt.Fprintf(cli.out, " %s\n", attribute) + } + } + + ioutils.FprintfIfTrue(cli.out, "Experimental: %v\n", info.ExperimentalBuild) + if info.ClusterStore != "" { + fmt.Fprintf(cli.out, "Cluster store: %s\n", info.ClusterStore) + } + + if info.ClusterAdvertise != "" { + fmt.Fprintf(cli.out, "Cluster advertise: %s\n", info.ClusterAdvertise) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/inspect.go b/vendor/github.com/docker/docker/api/client/inspect.go new file mode 100644 index 00000000..2e97a5aa --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/inspect.go @@ -0,0 +1,127 @@ +package client + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/client/inspect" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils/templates" + "github.com/docker/engine-api/client" +) + +// CmdInspect displays low-level information on one or more containers or images. +// +// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...] +func (cli *DockerCli) CmdInspect(args ...string) error { + cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE [CONTAINER|IMAGE...]"}, Cli.DockerCommands["inspect"].Description, true) + tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") + inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image or container)") + size := cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes if the type is container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + if *inspectType != "" && *inspectType != "container" && *inspectType != "image" { + return fmt.Errorf("%q is not a valid value for --type", *inspectType) + } + + var elementSearcher inspectSearcher + switch *inspectType { + case "container": + elementSearcher = cli.inspectContainers(*size) + case "image": + elementSearcher = cli.inspectImages(*size) + default: + elementSearcher = cli.inspectAll(*size) + } + + return cli.inspectElements(*tmplStr, cmd.Args(), elementSearcher) +} + +func (cli *DockerCli) inspectContainers(getSize bool) inspectSearcher { + return func(ref string) (interface{}, []byte, error) { + return cli.client.ContainerInspectWithRaw(context.Background(), ref, getSize) + } +} + +func (cli *DockerCli) inspectImages(getSize bool) inspectSearcher { + return func(ref string) (interface{}, []byte, error) { + return cli.client.ImageInspectWithRaw(context.Background(), ref, getSize) + } +} + +func (cli *DockerCli) inspectAll(getSize bool) inspectSearcher { + return func(ref string) (interface{}, []byte, error) { + c, rawContainer, err := cli.client.ContainerInspectWithRaw(context.Background(), ref, getSize) + if err != nil { + // Search for image with that id if a container doesn't exist. + if client.IsErrContainerNotFound(err) { + i, rawImage, err := cli.client.ImageInspectWithRaw(context.Background(), ref, getSize) + if err != nil { + if client.IsErrImageNotFound(err) { + return nil, nil, fmt.Errorf("Error: No such image or container: %s", ref) + } + return nil, nil, err + } + return i, rawImage, err + } + return nil, nil, err + } + return c, rawContainer, err + } +} + +type inspectSearcher func(ref string) (interface{}, []byte, error) + +func (cli *DockerCli) inspectElements(tmplStr string, references []string, searchByReference inspectSearcher) error { + elementInspector, err := cli.newInspectorWithTemplate(tmplStr) + if err != nil { + return Cli.StatusError{StatusCode: 64, Status: err.Error()} + } + + var inspectErr error + for _, ref := range references { + element, raw, err := searchByReference(ref) + if err != nil { + inspectErr = err + break + } + + if err := elementInspector.Inspect(element, raw); err != nil { + inspectErr = err + break + } + } + + if err := elementInspector.Flush(); err != nil { + cli.inspectErrorStatus(err) + } + + if status := cli.inspectErrorStatus(inspectErr); status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} + +func (cli *DockerCli) inspectErrorStatus(err error) (status int) { + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + } + return +} + +func (cli *DockerCli) newInspectorWithTemplate(tmplStr string) (inspect.Inspector, error) { + elementInspector := inspect.NewIndentedInspector(cli.out) + if tmplStr != "" { + tmpl, err := templates.Parse(tmplStr) + if err != nil { + return nil, fmt.Errorf("Template parsing error: %s", err) + } + elementInspector = inspect.NewTemplateInspector(cli.out, tmpl) + } + return elementInspector, nil +} diff --git a/vendor/github.com/docker/docker/api/client/inspect/inspector.go b/vendor/github.com/docker/docker/api/client/inspect/inspector.go new file mode 100644 index 00000000..a1d16d47 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/inspect/inspector.go @@ -0,0 +1,119 @@ +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "text/template" +) + +// Inspector defines an interface to implement to process elements +type Inspector interface { + Inspect(typedElement interface{}, rawElement []byte) error + Flush() error +} + +// TemplateInspector uses a text template to inspect elements. +type TemplateInspector struct { + outputStream io.Writer + buffer *bytes.Buffer + tmpl *template.Template +} + +// NewTemplateInspector creates a new inspector with a template. +func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { + return &TemplateInspector{ + outputStream: outputStream, + buffer: new(bytes.Buffer), + tmpl: tmpl, + } +} + +// Inspect executes the inspect template. +// It decodes the raw element into a map if the initial execution fails. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { + buffer := new(bytes.Buffer) + if err := i.tmpl.Execute(buffer, typedElement); err != nil { + if rawElement == nil { + return fmt.Errorf("Template parsing error: %v", err) + } + return i.tryRawInspectFallback(rawElement, err) + } + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// Flush write the result of inspecting all elements into the output stream. +func (i *TemplateInspector) Flush() error { + if i.buffer.Len() == 0 { + _, err := io.WriteString(i.outputStream, "\n") + return err + } + _, err := io.Copy(i.outputStream, i.buffer) + return err +} + +// IndentedInspector uses a buffer to stop the indented representation of an element. +type IndentedInspector struct { + outputStream io.Writer + elements []interface{} + rawElements [][]byte +} + +// NewIndentedInspector generates a new IndentedInspector. +func NewIndentedInspector(outputStream io.Writer) Inspector { + return &IndentedInspector{ + outputStream: outputStream, + } +} + +// Inspect writes the raw element with an indented json format. +func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { + if rawElement != nil { + i.rawElements = append(i.rawElements, rawElement) + } else { + i.elements = append(i.elements, typedElement) + } + return nil +} + +// Flush write the result of inspecting all elements into the output stream. +func (i *IndentedInspector) Flush() error { + if len(i.elements) == 0 && len(i.rawElements) == 0 { + _, err := io.WriteString(i.outputStream, "[]\n") + return err + } + + var buffer io.Reader + if len(i.rawElements) > 0 { + bytesBuffer := new(bytes.Buffer) + bytesBuffer.WriteString("[") + for idx, r := range i.rawElements { + bytesBuffer.Write(r) + if idx < len(i.rawElements)-1 { + bytesBuffer.WriteString(",") + } + } + bytesBuffer.WriteString("]") + indented := new(bytes.Buffer) + if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { + return err + } + buffer = indented + } else { + b, err := json.MarshalIndent(i.elements, "", " ") + if err != nil { + return err + } + buffer = bytes.NewReader(b) + } + + if _, err := io.Copy(i.outputStream, buffer); err != nil { + return err + } + _, err := io.WriteString(i.outputStream, "\n") + return err +} diff --git a/vendor/github.com/docker/docker/api/client/inspect/inspector_go14.go b/vendor/github.com/docker/docker/api/client/inspect/inspector_go14.go new file mode 100644 index 00000000..39a0510c --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/inspect/inspector_go14.go @@ -0,0 +1,40 @@ +// +build !go1.5 + +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +// tryeRawInspectFallback executes the inspect template with a raw interface. +// This allows docker cli to parse inspect structs injected with Swarm fields. +// Unfortunately, go 1.4 doesn't fail executing invalid templates when the input is an interface. +// It doesn't allow to modify this behavior either, sending messages to the output. +// We assume that the template is invalid when there is a , if the template was valid +// we'd get or "" values. In that case we fail with the original error raised executing the +// template with the typed input. +func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte, originalErr error) error { + var raw interface{} + buffer := new(bytes.Buffer) + rdr := bytes.NewReader(rawElement) + dec := json.NewDecoder(rdr) + + if rawErr := dec.Decode(&raw); rawErr != nil { + return fmt.Errorf("unable to read inspect data: %v", rawErr) + } + + if rawErr := i.tmpl.Execute(buffer, raw); rawErr != nil { + return fmt.Errorf("Template parsing error: %v", rawErr) + } + + if strings.Contains(buffer.String(), "") { + return fmt.Errorf("Template parsing error: %v", originalErr) + } + + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/inspect/inspector_go15.go b/vendor/github.com/docker/docker/api/client/inspect/inspector_go15.go new file mode 100644 index 00000000..b098f415 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/inspect/inspector_go15.go @@ -0,0 +1,29 @@ +// +build go1.5 + +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" +) + +func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte, _ error) error { + var raw interface{} + buffer := new(bytes.Buffer) + rdr := bytes.NewReader(rawElement) + dec := json.NewDecoder(rdr) + + if rawErr := dec.Decode(&raw); rawErr != nil { + return fmt.Errorf("unable to read inspect data: %v", rawErr) + } + + tmplMissingKey := i.tmpl.Option("missingkey=error") + if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { + return fmt.Errorf("Template parsing error: %v", rawErr) + } + + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/kill.go b/vendor/github.com/docker/docker/api/client/kill.go new file mode 100644 index 00000000..9841ba4d --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/kill.go @@ -0,0 +1,35 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdKill kills one or more running container using SIGKILL or a specified signal. +// +// Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdKill(args ...string) error { + cmd := Cli.Subcmd("kill", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["kill"].Description, true) + signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerKill(context.Background(), name, *signal); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/load.go b/vendor/github.com/docker/docker/api/client/load.go new file mode 100644 index 00000000..820fdc0e --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/load.go @@ -0,0 +1,50 @@ +package client + +import ( + "io" + "os" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdLoad loads an image from a tar archive. +// +// The tar archive is read from STDIN by default, or from a tar archive file. +// +// Usage: docker load [OPTIONS] +func (cli *DockerCli) CmdLoad(args ...string) error { + cmd := Cli.Subcmd("load", nil, Cli.DockerCommands["load"].Description, true) + infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the load output") + cmd.Require(flag.Exact, 0) + cmd.ParseFlags(args, true) + + var input io.Reader = cli.in + if *infile != "" { + file, err := os.Open(*infile) + if err != nil { + return err + } + defer file.Close() + input = file + } + if !cli.isTerminalOut { + *quiet = true + } + response, err := cli.client.ImageLoad(context.Background(), input, *quiet) + if err != nil { + return err + } + defer response.Body.Close() + + if response.Body != nil && response.JSON { + return jsonmessage.DisplayJSONMessagesStream(response.Body, cli.out, cli.outFd, cli.isTerminalOut, nil) + } + + _, err = io.Copy(cli.out, response.Body) + return err +} diff --git a/vendor/github.com/docker/docker/api/client/login.go b/vendor/github.com/docker/docker/api/client/login.go new file mode 100644 index 00000000..a772348c --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/login.go @@ -0,0 +1,177 @@ +package client + +import ( + "bufio" + "fmt" + "io" + "os" + "runtime" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/cliconfig/credentials" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/term" + "github.com/docker/engine-api/types" +) + +// CmdLogin logs in a user to a Docker registry service. +// +// If no server is specified, the user will be logged into or registered to the registry's index server. +// +// Usage: docker login SERVER +func (cli *DockerCli) CmdLogin(args ...string) error { + cmd := Cli.Subcmd("login", []string{"[SERVER]"}, Cli.DockerCommands["login"].Description+".\nIf no server is specified, the default is defined by the daemon.", true) + cmd.Require(flag.Max, 1) + + flUser := cmd.String([]string{"u", "-username"}, "", "Username") + flPassword := cmd.String([]string{"p", "-password"}, "", "Password") + + // Deprecated in 1.11: Should be removed in docker 1.13 + cmd.String([]string{"#e", "#-email"}, "", "Email") + + cmd.ParseFlags(args, true) + + // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 + if runtime.GOOS == "windows" { + cli.in = os.Stdin + } + + var serverAddress string + var isDefaultRegistry bool + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } else { + serverAddress = cli.electAuthServer() + isDefaultRegistry = true + } + + authConfig, err := cli.configureAuth(*flUser, *flPassword, serverAddress, isDefaultRegistry) + if err != nil { + return err + } + + response, err := cli.client.RegistryLogin(context.Background(), authConfig) + if err != nil { + return err + } + + if response.IdentityToken != "" { + authConfig.Password = "" + authConfig.IdentityToken = response.IdentityToken + } + if err := storeCredentials(cli.configFile, authConfig); err != nil { + return fmt.Errorf("Error saving credentials: %v", err) + } + + if response.Status != "" { + fmt.Fprintln(cli.out, response.Status) + } + return nil +} + +func (cli *DockerCli) promptWithDefault(prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(cli.out, "%s: ", prompt) + } else { + fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) + } +} + +func (cli *DockerCli) configureAuth(flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { + authconfig, err := getCredentials(cli.configFile, serverAddress) + if err != nil { + return authconfig, err + } + + authconfig.Username = strings.TrimSpace(authconfig.Username) + + if flUser = strings.TrimSpace(flUser); flUser == "" { + if isDefaultRegistry { + // if this is a defauly registry (docker hub), then display the following message. + fmt.Fprintln(cli.out, "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") + } + cli.promptWithDefault("Username", authconfig.Username) + flUser = readInput(cli.in, cli.out) + flUser = strings.TrimSpace(flUser) + if flUser == "" { + flUser = authconfig.Username + } + } + + if flUser == "" { + return authconfig, fmt.Errorf("Error: Non-null Username Required") + } + + if flPassword == "" { + oldState, err := term.SaveState(cli.inFd) + if err != nil { + return authconfig, err + } + fmt.Fprintf(cli.out, "Password: ") + term.DisableEcho(cli.inFd, oldState) + + flPassword = readInput(cli.in, cli.out) + fmt.Fprint(cli.out, "\n") + + term.RestoreTerminal(cli.inFd, oldState) + if flPassword == "" { + return authconfig, fmt.Errorf("Error: Password Required") + } + } + + authconfig.Username = flUser + authconfig.Password = flPassword + authconfig.ServerAddress = serverAddress + authconfig.IdentityToken = "" + + return authconfig, nil +} + +func readInput(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) +} + +// getCredentials loads the user credentials from a credentials store. +// The store is determined by the config file settings. +func getCredentials(c *cliconfig.ConfigFile, serverAddress string) (types.AuthConfig, error) { + s := loadCredentialsStore(c) + return s.Get(serverAddress) +} + +func getAllCredentials(c *cliconfig.ConfigFile) (map[string]types.AuthConfig, error) { + s := loadCredentialsStore(c) + return s.GetAll() +} + +// storeCredentials saves the user credentials in a credentials store. +// The store is determined by the config file settings. +func storeCredentials(c *cliconfig.ConfigFile, auth types.AuthConfig) error { + s := loadCredentialsStore(c) + return s.Store(auth) +} + +// eraseCredentials removes the user credentials from a credentials store. +// The store is determined by the config file settings. +func eraseCredentials(c *cliconfig.ConfigFile, serverAddress string) error { + s := loadCredentialsStore(c) + return s.Erase(serverAddress) +} + +// loadCredentialsStore initializes a new credentials store based +// in the settings provided in the configuration file. +func loadCredentialsStore(c *cliconfig.ConfigFile) credentials.Store { + if c.CredentialsStore != "" { + return credentials.NewNativeStore(c) + } + return credentials.NewFileStore(c) +} diff --git a/vendor/github.com/docker/docker/api/client/logout.go b/vendor/github.com/docker/docker/api/client/logout.go new file mode 100644 index 00000000..b5ff59dd --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/logout.go @@ -0,0 +1,41 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdLogout logs a user out from a Docker registry. +// +// If no server is specified, the user will be logged out from the registry's index server. +// +// Usage: docker logout [SERVER] +func (cli *DockerCli) CmdLogout(args ...string) error { + cmd := Cli.Subcmd("logout", []string{"[SERVER]"}, Cli.DockerCommands["logout"].Description+".\nIf no server is specified, the default is defined by the daemon.", true) + cmd.Require(flag.Max, 1) + + cmd.ParseFlags(args, true) + + var serverAddress string + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } else { + serverAddress = cli.electAuthServer() + } + + // check if we're logged in based on the records in the config file + // which means it couldn't have user/pass cause they may be in the creds store + if _, ok := cli.configFile.AuthConfigs[serverAddress]; !ok { + fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) + return nil + } + + fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) + if err := eraseCredentials(cli.configFile, serverAddress); err != nil { + fmt.Fprintf(cli.out, "WARNING: could not erase credentials: %v\n", err) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/logs.go b/vendor/github.com/docker/docker/api/client/logs.go new file mode 100644 index 00000000..7cd5605e --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/logs.go @@ -0,0 +1,65 @@ +package client + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/engine-api/types" +) + +var validDrivers = map[string]bool{ + "json-file": true, + "journald": true, +} + +// CmdLogs fetches the logs of a given container. +// +// docker logs [OPTIONS] CONTAINER +func (cli *DockerCli) CmdLogs(args ...string) error { + cmd := Cli.Subcmd("logs", []string{"CONTAINER"}, Cli.DockerCommands["logs"].Description, true) + follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") + since := cmd.String([]string{"-since"}, "", "Show logs since timestamp") + times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") + tail := cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + name := cmd.Arg(0) + + c, err := cli.client.ContainerInspect(context.Background(), name) + if err != nil { + return err + } + + if !validDrivers[c.HostConfig.LogConfig.Type] { + return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type) + } + + options := types.ContainerLogsOptions{ + ContainerID: name, + ShowStdout: true, + ShowStderr: true, + Since: *since, + Timestamps: *times, + Follow: *follow, + Tail: *tail, + } + responseBody, err := cli.client.ContainerLogs(context.Background(), options) + if err != nil { + return err + } + defer responseBody.Close() + + if c.Config.Tty { + _, err = io.Copy(cli.out, responseBody) + } else { + _, err = stdcopy.StdCopy(cli.out, cli.err, responseBody) + } + return err +} diff --git a/vendor/github.com/docker/docker/api/client/network.go b/vendor/github.com/docker/docker/api/client/network.go new file mode 100644 index 00000000..4bbc7154 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/network.go @@ -0,0 +1,392 @@ +package client + +import ( + "fmt" + "net" + "sort" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringid" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "github.com/docker/engine-api/types/network" +) + +// CmdNetwork is the parent subcommand for all network commands +// +// Usage: docker network [OPTIONS] +func (cli *DockerCli) CmdNetwork(args ...string) error { + cmd := Cli.Subcmd("network", []string{"COMMAND [OPTIONS]"}, networkUsage(), false) + cmd.Require(flag.Min, 1) + err := cmd.ParseFlags(args, true) + cmd.Usage() + return err +} + +// CmdNetworkCreate creates a new network with a given name +// +// Usage: docker network create [OPTIONS] +func (cli *DockerCli) CmdNetworkCreate(args ...string) error { + cmd := Cli.Subcmd("network create", []string{"NETWORK-NAME"}, "Creates a new network with a name specified by the user", false) + flDriver := cmd.String([]string{"d", "-driver"}, "bridge", "Driver to manage the Network") + flOpts := opts.NewMapOpts(nil, nil) + + flIpamDriver := cmd.String([]string{"-ipam-driver"}, "default", "IP Address Management Driver") + flIpamSubnet := opts.NewListOpts(nil) + flIpamIPRange := opts.NewListOpts(nil) + flIpamGateway := opts.NewListOpts(nil) + flIpamAux := opts.NewMapOpts(nil, nil) + flIpamOpt := opts.NewMapOpts(nil, nil) + flLabels := opts.NewListOpts(nil) + + cmd.Var(&flIpamSubnet, []string{"-subnet"}, "subnet in CIDR format that represents a network segment") + cmd.Var(&flIpamIPRange, []string{"-ip-range"}, "allocate container ip from a sub-range") + cmd.Var(&flIpamGateway, []string{"-gateway"}, "ipv4 or ipv6 Gateway for the master subnet") + cmd.Var(flIpamAux, []string{"-aux-address"}, "auxiliary ipv4 or ipv6 addresses used by Network driver") + cmd.Var(flOpts, []string{"o", "-opt"}, "set driver specific options") + cmd.Var(flIpamOpt, []string{"-ipam-opt"}, "set IPAM driver specific options") + cmd.Var(&flLabels, []string{"-label"}, "set metadata on a network") + + flInternal := cmd.Bool([]string{"-internal"}, false, "restricts external access to the network") + flIPv6 := cmd.Bool([]string{"-ipv6"}, false, "enable IPv6 networking") + + cmd.Require(flag.Exact, 1) + err := cmd.ParseFlags(args, true) + if err != nil { + return err + } + + // Set the default driver to "" if the user didn't set the value. + // That way we can know whether it was user input or not. + driver := *flDriver + if !cmd.IsSet("-driver") && !cmd.IsSet("d") { + driver = "" + } + + ipamCfg, err := consolidateIpam(flIpamSubnet.GetAll(), flIpamIPRange.GetAll(), flIpamGateway.GetAll(), flIpamAux.GetAll()) + if err != nil { + return err + } + + // Construct network create request body + nc := types.NetworkCreate{ + Name: cmd.Arg(0), + Driver: driver, + IPAM: network.IPAM{Driver: *flIpamDriver, Config: ipamCfg, Options: flIpamOpt.GetAll()}, + Options: flOpts.GetAll(), + CheckDuplicate: true, + Internal: *flInternal, + EnableIPv6: *flIPv6, + Labels: runconfigopts.ConvertKVStringsToMap(flLabels.GetAll()), + } + + resp, err := cli.client.NetworkCreate(context.Background(), nc) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "%s\n", resp.ID) + return nil +} + +// CmdNetworkRm deletes one or more networks +// +// Usage: docker network rm NETWORK-NAME|NETWORK-ID [NETWORK-NAME|NETWORK-ID...] +func (cli *DockerCli) CmdNetworkRm(args ...string) error { + cmd := Cli.Subcmd("network rm", []string{"NETWORK [NETWORK...]"}, "Deletes one or more networks", false) + cmd.Require(flag.Min, 1) + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + + status := 0 + for _, net := range cmd.Args() { + if err := cli.client.NetworkRemove(context.Background(), net); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} + +// CmdNetworkConnect connects a container to a network +// +// Usage: docker network connect [OPTIONS] +func (cli *DockerCli) CmdNetworkConnect(args ...string) error { + cmd := Cli.Subcmd("network connect", []string{"NETWORK CONTAINER"}, "Connects a container to a network", false) + flIPAddress := cmd.String([]string{"-ip"}, "", "IP Address") + flIPv6Address := cmd.String([]string{"-ip6"}, "", "IPv6 Address") + flLinks := opts.NewListOpts(runconfigopts.ValidateLink) + cmd.Var(&flLinks, []string{"-link"}, "Add link to another container") + flAliases := opts.NewListOpts(nil) + cmd.Var(&flAliases, []string{"-alias"}, "Add network-scoped alias for the container") + cmd.Require(flag.Min, 2) + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + epConfig := &network.EndpointSettings{ + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: *flIPAddress, + IPv6Address: *flIPv6Address, + }, + Links: flLinks.GetAll(), + Aliases: flAliases.GetAll(), + } + return cli.client.NetworkConnect(context.Background(), cmd.Arg(0), cmd.Arg(1), epConfig) +} + +// CmdNetworkDisconnect disconnects a container from a network +// +// Usage: docker network disconnect +func (cli *DockerCli) CmdNetworkDisconnect(args ...string) error { + cmd := Cli.Subcmd("network disconnect", []string{"NETWORK CONTAINER"}, "Disconnects container from a network", false) + force := cmd.Bool([]string{"f", "-force"}, false, "Force the container to disconnect from a network") + cmd.Require(flag.Exact, 2) + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + + return cli.client.NetworkDisconnect(context.Background(), cmd.Arg(0), cmd.Arg(1), *force) +} + +// CmdNetworkLs lists all the networks managed by docker daemon +// +// Usage: docker network ls [OPTIONS] +func (cli *DockerCli) CmdNetworkLs(args ...string) error { + cmd := Cli.Subcmd("network ls", nil, "Lists networks", true) + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Do not truncate the output") + + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + + cmd.Require(flag.Exact, 0) + err := cmd.ParseFlags(args, true) + if err != nil { + return err + } + + // Consolidate all filter flags, and sanity check them early. + // They'll get process after get response from server. + netFilterArgs := filters.NewArgs() + for _, f := range flFilter.GetAll() { + if netFilterArgs, err = filters.ParseFlag(f, netFilterArgs); err != nil { + return err + } + } + + options := types.NetworkListOptions{ + Filters: netFilterArgs, + } + + networkResources, err := cli.client.NetworkList(context.Background(), options) + if err != nil { + return err + } + + wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + + // unless quiet (-q) is specified, print field titles + if !*quiet { + fmt.Fprintln(wr, "NETWORK ID\tNAME\tDRIVER") + } + sort.Sort(byNetworkName(networkResources)) + for _, networkResource := range networkResources { + ID := networkResource.ID + netName := networkResource.Name + if !*noTrunc { + ID = stringid.TruncateID(ID) + } + if *quiet { + fmt.Fprintln(wr, ID) + continue + } + driver := networkResource.Driver + fmt.Fprintf(wr, "%s\t%s\t%s\t", + ID, + netName, + driver) + fmt.Fprint(wr, "\n") + } + wr.Flush() + return nil +} + +type byNetworkName []types.NetworkResource + +func (r byNetworkName) Len() int { return len(r) } +func (r byNetworkName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byNetworkName) Less(i, j int) bool { return r[i].Name < r[j].Name } + +// CmdNetworkInspect inspects the network object for more details +// +// Usage: docker network inspect [OPTIONS] [NETWORK...] +func (cli *DockerCli) CmdNetworkInspect(args ...string) error { + cmd := Cli.Subcmd("network inspect", []string{"NETWORK [NETWORK...]"}, "Displays detailed information on one or more networks", false) + tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") + cmd.Require(flag.Min, 1) + + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + + inspectSearcher := func(name string) (interface{}, []byte, error) { + i, err := cli.client.NetworkInspect(context.Background(), name) + return i, nil, err + } + + return cli.inspectElements(*tmplStr, cmd.Args(), inspectSearcher) +} + +// Consolidates the ipam configuration as a group from different related configurations +// user can configure network with multiple non-overlapping subnets and hence it is +// possible to correlate the various related parameters and consolidate them. +// consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into +// structured ipam data. +func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { + if len(subnets) < len(ranges) || len(subnets) < len(gateways) { + return nil, fmt.Errorf("every ip-range or gateway must have a corresponding subnet") + } + iData := map[string]*network.IPAMConfig{} + + // Populate non-overlapping subnets into consolidation map + for _, s := range subnets { + for k := range iData { + ok1, err := subnetMatches(s, k) + if err != nil { + return nil, err + } + ok2, err := subnetMatches(k, s) + if err != nil { + return nil, err + } + if ok1 || ok2 { + return nil, fmt.Errorf("multiple overlapping subnet configuration is not supported") + } + } + iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} + } + + // Validate and add valid ip ranges + for _, r := range ranges { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, r) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].IPRange != "" { + return nil, fmt.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) + } + d := iData[s] + d.IPRange = r + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for range %s", r) + } + } + + // Validate and add valid gateways + for _, g := range gateways { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, g) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].Gateway != "" { + return nil, fmt.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) + } + d := iData[s] + d.Gateway = g + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for gateway %s", g) + } + } + + // Validate and add aux-addresses + for key, aa := range auxaddrs { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, aa) + if err != nil { + return nil, err + } + if !ok { + continue + } + iData[s].AuxAddress[key] = aa + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for aux-address %s", aa) + } + } + + idl := []network.IPAMConfig{} + for _, v := range iData { + idl = append(idl, *v) + } + return idl, nil +} + +func subnetMatches(subnet, data string) (bool, error) { + var ( + ip net.IP + ) + + _, s, err := net.ParseCIDR(subnet) + if err != nil { + return false, fmt.Errorf("Invalid subnet %s : %v", s, err) + } + + if strings.Contains(data, "/") { + ip, _, err = net.ParseCIDR(data) + if err != nil { + return false, fmt.Errorf("Invalid cidr %s : %v", data, err) + } + } else { + ip = net.ParseIP(data) + } + + return s.Contains(ip), nil +} + +func networkUsage() string { + networkCommands := map[string]string{ + "create": "Create a network", + "connect": "Connect container to a network", + "disconnect": "Disconnect container from a network", + "inspect": "Display detailed network information", + "ls": "List all networks", + "rm": "Remove a network", + } + + help := "Commands:\n" + + for cmd, description := range networkCommands { + help += fmt.Sprintf(" %-25.25s%s\n", cmd, description) + } + + help += fmt.Sprintf("\nRun 'docker network COMMAND --help' for more information on a command.") + return help +} diff --git a/vendor/github.com/docker/docker/api/client/pause.go b/vendor/github.com/docker/docker/api/client/pause.go new file mode 100644 index 00000000..ffba1c9a --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/pause.go @@ -0,0 +1,34 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdPause pauses all processes within one or more containers. +// +// Usage: docker pause CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdPause(args ...string) error { + cmd := Cli.Subcmd("pause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["pause"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerPause(context.Background(), name); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/port.go b/vendor/github.com/docker/docker/api/client/port.go new file mode 100644 index 00000000..9b545f56 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/port.go @@ -0,0 +1,61 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/go-connections/nat" +) + +// CmdPort lists port mappings for a container. +// If a private port is specified, it also shows the public-facing port that is NATed to the private port. +// +// Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] +func (cli *DockerCli) CmdPort(args ...string) error { + cmd := Cli.Subcmd("port", []string{"CONTAINER [PRIVATE_PORT[/PROTO]]"}, Cli.DockerCommands["port"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + c, err := cli.client.ContainerInspect(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + + if cmd.NArg() == 2 { + var ( + port = cmd.Arg(1) + proto = "tcp" + parts = strings.SplitN(port, "/", 2) + ) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + natPort := port + "/" + proto + newP, err := nat.NewPort(proto, port) + if err != nil { + return err + } + if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIP, frontend.HostPort) + } + return nil + } + return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0)) + } + + for from, frontends := range c.NetworkSettings.Ports { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/ps.go b/vendor/github.com/docker/docker/api/client/ps.go new file mode 100644 index 00000000..3627ffc1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/ps.go @@ -0,0 +1,89 @@ +package client + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/client/formatter" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +// CmdPs outputs a list of Docker containers. +// +// Usage: docker ps [OPTIONS] +func (cli *DockerCli) CmdPs(args ...string) error { + var ( + err error + + psFilterArgs = filters.NewArgs() + + cmd = Cli.Subcmd("ps", nil, Cli.DockerCommands["ps"].Description, true) + quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes") + all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") + noTrunc = cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container (includes all states)") + since = cmd.String([]string{"#-since"}, "", "Show containers created since Id or Name (includes all states)") + before = cmd.String([]string{"#-before"}, "", "Only show containers created before Id or Name") + last = cmd.Int([]string{"n"}, -1, "Show n last created containers (includes all states)") + format = cmd.String([]string{"-format"}, "", "Pretty-print containers using a Go template") + flFilter = opts.NewListOpts(nil) + ) + cmd.Require(flag.Exact, 0) + + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + + cmd.ParseFlags(args, true) + if *last == -1 && *nLatest { + *last = 1 + } + + // Consolidate all filter flags, and sanity check them. + // They'll get processed in the daemon/server. + for _, f := range flFilter.GetAll() { + if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil { + return err + } + } + + options := types.ContainerListOptions{ + All: *all, + Limit: *last, + Since: *since, + Before: *before, + Size: *size, + Filter: psFilterArgs, + } + + containers, err := cli.client.ContainerList(context.Background(), options) + if err != nil { + return err + } + + f := *format + if len(f) == 0 { + if len(cli.PsFormat()) > 0 && !*quiet { + f = cli.PsFormat() + } else { + f = "table" + } + } + + psCtx := formatter.ContainerContext{ + Context: formatter.Context{ + Output: cli.out, + Format: f, + Quiet: *quiet, + Trunc: !*noTrunc, + }, + Size: *size, + Containers: containers, + } + + psCtx.Write() + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/pull.go b/vendor/github.com/docker/docker/api/client/pull.go new file mode 100644 index 00000000..eb79d38b --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/pull.go @@ -0,0 +1,73 @@ +package client + +import ( + "errors" + "fmt" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" +) + +// CmdPull pulls an image or a repository from the registry. +// +// Usage: docker pull [OPTIONS] IMAGENAME[:TAG|@DIGEST] +func (cli *DockerCli) CmdPull(args ...string) error { + cmd := Cli.Subcmd("pull", []string{"NAME[:TAG|@DIGEST]"}, Cli.DockerCommands["pull"].Description, true) + allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + remote := cmd.Arg(0) + + distributionRef, err := reference.ParseNamed(remote) + if err != nil { + return err + } + if *allTags && !reference.IsNameOnly(distributionRef) { + return errors.New("tag can't be used with --all-tags/-a") + } + + if !*allTags && reference.IsNameOnly(distributionRef) { + distributionRef = reference.WithDefaultTag(distributionRef) + fmt.Fprintf(cli.out, "Using default tag: %s\n", reference.DefaultTag) + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(distributionRef) + if err != nil { + return err + } + + authConfig := cli.resolveAuthConfig(repoInfo.Index) + requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "pull") + + return cli.imagePullPrivileged(authConfig, distributionRef.String(), "", requestPrivilege) +} + +func (cli *DockerCli) imagePullPrivileged(authConfig types.AuthConfig, imageID, tag string, requestPrivilege client.RequestPrivilegeFunc) error { + + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return err + } + options := types.ImagePullOptions{ + ImageID: imageID, + Tag: tag, + RegistryAuth: encodedAuth, + } + + responseBody, err := cli.client.ImagePull(context.Background(), options, requestPrivilege) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) +} diff --git a/vendor/github.com/docker/docker/api/client/push.go b/vendor/github.com/docker/docker/api/client/push.go new file mode 100644 index 00000000..5f51ec27 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/push.go @@ -0,0 +1,72 @@ +package client + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" +) + +// CmdPush pushes an image or repository to the registry. +// +// Usage: docker push NAME[:TAG] +func (cli *DockerCli) CmdPush(args ...string) error { + cmd := Cli.Subcmd("push", []string{"NAME[:TAG]"}, Cli.DockerCommands["push"].Description, true) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + ref, err := reference.ParseNamed(cmd.Arg(0)) + if err != nil { + return err + } + + var tag string + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot push a digest reference") + case reference.NamedTagged: + tag = x.Tag() + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + // Resolve the Auth config relevant for this server + authConfig := cli.resolveAuthConfig(repoInfo.Index) + + requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "push") + + responseBody, err := cli.imagePushPrivileged(authConfig, ref.Name(), tag, requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) +} + +func (cli *DockerCli) imagePushPrivileged(authConfig types.AuthConfig, imageID, tag string, requestPrivilege client.RequestPrivilegeFunc) (io.ReadCloser, error) { + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return nil, err + } + options := types.ImagePushOptions{ + ImageID: imageID, + Tag: tag, + RegistryAuth: encodedAuth, + } + + return cli.client.ImagePush(context.Background(), options, requestPrivilege) +} diff --git a/vendor/github.com/docker/docker/api/client/rename.go b/vendor/github.com/docker/docker/api/client/rename.go new file mode 100644 index 00000000..68369881 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/rename.go @@ -0,0 +1,34 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRename renames a container. +// +// Usage: docker rename OLD_NAME NEW_NAME +func (cli *DockerCli) CmdRename(args ...string) error { + cmd := Cli.Subcmd("rename", []string{"OLD_NAME NEW_NAME"}, Cli.DockerCommands["rename"].Description, true) + cmd.Require(flag.Exact, 2) + + cmd.ParseFlags(args, true) + + oldName := strings.TrimSpace(cmd.Arg(0)) + newName := strings.TrimSpace(cmd.Arg(1)) + + if oldName == "" || newName == "" { + return fmt.Errorf("Error: Neither old nor new names may be empty") + } + + if err := cli.client.ContainerRename(context.Background(), oldName, newName); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + return fmt.Errorf("Error: failed to rename container named %s", oldName) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/restart.go b/vendor/github.com/docker/docker/api/client/restart.go new file mode 100644 index 00000000..c0a04bd1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/restart.go @@ -0,0 +1,35 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRestart restarts one or more containers. +// +// Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdRestart(args ...string) error { + cmd := Cli.Subcmd("restart", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["restart"].Description, true) + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerRestart(context.Background(), name, *nSeconds); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/rm.go b/vendor/github.com/docker/docker/api/client/rm.go new file mode 100644 index 00000000..c252b1f7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/rm.go @@ -0,0 +1,56 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" +) + +// CmdRm removes one or more containers. +// +// Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdRm(args ...string) error { + cmd := Cli.Subcmd("rm", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["rm"].Description, true) + v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") + link := cmd.Bool([]string{"l", "-link"}, false, "Remove the specified link") + force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if name == "" { + return fmt.Errorf("Container name cannot be empty") + } + name = strings.Trim(name, "/") + + if err := cli.removeContainer(name, *v, *link, *force); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} + +func (cli *DockerCli) removeContainer(containerID string, removeVolumes, removeLinks, force bool) error { + options := types.ContainerRemoveOptions{ + ContainerID: containerID, + RemoveVolumes: removeVolumes, + RemoveLinks: removeLinks, + Force: force, + } + if err := cli.client.ContainerRemove(context.Background(), options); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/rmi.go b/vendor/github.com/docker/docker/api/client/rmi.go new file mode 100644 index 00000000..ac1b41db --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/rmi.go @@ -0,0 +1,59 @@ +package client + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" +) + +// CmdRmi removes all images with the specified name(s). +// +// Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] +func (cli *DockerCli) CmdRmi(args ...string) error { + cmd := Cli.Subcmd("rmi", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["rmi"].Description, true) + force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image") + noprune := cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + v := url.Values{} + if *force { + v.Set("force", "1") + } + if *noprune { + v.Set("noprune", "1") + } + + var errs []string + for _, name := range cmd.Args() { + options := types.ImageRemoveOptions{ + ImageID: name, + Force: *force, + PruneChildren: !*noprune, + } + + dels, err := cli.client.ImageRemove(context.Background(), options) + if err != nil { + errs = append(errs, err.Error()) + } else { + for _, del := range dels { + if del.Deleted != "" { + fmt.Fprintf(cli.out, "Deleted: %s\n", del.Deleted) + } else { + fmt.Fprintf(cli.out, "Untagged: %s\n", del.Untagged) + } + } + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/run.go b/vendor/github.com/docker/docker/api/client/run.go new file mode 100644 index 00000000..be0c62cb --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/run.go @@ -0,0 +1,274 @@ +package client + +import ( + "fmt" + "io" + "os" + "runtime" + "strings" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" +) + +const ( + errCmdNotFound = "not found or does not exist." + errCmdCouldNotBeInvoked = "could not be invoked." +) + +func (cid *cidFile) Close() error { + cid.file.Close() + + if !cid.written { + if err := os.Remove(cid.path); err != nil { + return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) + } + } + + return nil +} + +func (cid *cidFile) Write(id string) error { + if _, err := cid.file.Write([]byte(id)); err != nil { + return fmt.Errorf("Failed to write the container ID to the file: %s", err) + } + cid.written = true + return nil +} + +// if container start fails with 'command not found' error, return 127 +// if container start fails with 'command cannot be invoked' error, return 126 +// return 125 for generic docker daemon failures +func runStartContainerErr(err error) error { + trimmedErr := strings.Trim(err.Error(), "Error response from daemon: ") + statusError := Cli.StatusError{StatusCode: 125} + + if strings.HasPrefix(trimmedErr, "Container command") { + if strings.Contains(trimmedErr, errCmdNotFound) { + statusError = Cli.StatusError{StatusCode: 127} + } else if strings.Contains(trimmedErr, errCmdCouldNotBeInvoked) { + statusError = Cli.StatusError{StatusCode: 126} + } + } + + return statusError +} + +// CmdRun runs a command in a new container. +// +// Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] +func (cli *DockerCli) CmdRun(args ...string) error { + cmd := Cli.Subcmd("run", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["run"].Description, true) + + // These are flags not stored in Config/HostConfig + var ( + flAutoRemove = cmd.Bool([]string{"-rm"}, false, "Automatically remove the container when it exits") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID") + flSigProxy = cmd.Bool([]string{"-sig-proxy"}, true, "Proxy received signals to the process") + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") + flDetachKeys = cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + flAttach *opts.ListOpts + + ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") + ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") + ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") + ) + + config, hostConfig, networkingConfig, cmd, err := runconfigopts.Parse(cmd, args) + + // just in case the Parse does not exit + if err != nil { + cmd.ReportError(err.Error(), true) + os.Exit(125) + } + + if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { + fmt.Fprintf(cli.err, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n") + } + + if config.Image == "" { + cmd.Usage() + return nil + } + + config.ArgsEscaped = false + + if !*flDetach { + if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { + return err + } + } else { + if fl := cmd.Lookup("-attach"); fl != nil { + flAttach = fl.Value.(*opts.ListOpts) + if flAttach.Len() != 0 { + return ErrConflictAttachDetach + } + } + if *flAutoRemove { + return ErrConflictDetachAutoRemove + } + + config.AttachStdin = false + config.AttachStdout = false + config.AttachStderr = false + config.StdinOnce = false + } + + // Disable flSigProxy when in TTY mode + sigProxy := *flSigProxy + if config.Tty { + sigProxy = false + } + + // Telling the Windows daemon the initial size of the tty during start makes + // a far better user experience rather than relying on subsequent resizes + // to cause things to catch up. + if runtime.GOOS == "windows" { + hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = cli.getTtySize() + } + + createResponse, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + cmd.ReportError(err.Error(), true) + return runStartContainerErr(err) + } + if sigProxy { + sigc := cli.forwardAllSignals(createResponse.ID) + defer signal.StopCatch(sigc) + } + var ( + waitDisplayID chan struct{} + errCh chan error + ) + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchronous to allow the client to write to stdin before having to read the ID + waitDisplayID = make(chan struct{}) + go func() { + defer close(waitDisplayID) + fmt.Fprintf(cli.out, "%s\n", createResponse.ID) + }() + } + if *flAutoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) { + return ErrConflictRestartPolicyAndAutoRemove + } + + if config.AttachStdin || config.AttachStdout || config.AttachStderr { + var ( + out, stderr io.Writer + in io.ReadCloser + ) + if config.AttachStdin { + in = cli.in + } + if config.AttachStdout { + out = cli.out + } + if config.AttachStderr { + if config.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + + if *flDetachKeys != "" { + cli.configFile.DetachKeys = *flDetachKeys + } + + options := types.ContainerAttachOptions{ + ContainerID: createResponse.ID, + Stream: true, + Stdin: config.AttachStdin, + Stdout: config.AttachStdout, + Stderr: config.AttachStderr, + DetachKeys: cli.configFile.DetachKeys, + } + + resp, err := cli.client.ContainerAttach(context.Background(), options) + if err != nil { + return err + } + if in != nil && config.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } + errCh = promise.Go(func() error { + return cli.holdHijackedConnection(config.Tty, in, out, stderr, resp) + }) + } + + if *flAutoRemove { + defer func() { + if err := cli.removeContainer(createResponse.ID, true, false, false); err != nil { + fmt.Fprintf(cli.err, "%v\n", err) + } + }() + } + + //start the container + if err := cli.client.ContainerStart(context.Background(), createResponse.ID); err != nil { + cmd.ReportError(err.Error(), false) + return runStartContainerErr(err) + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut { + if err := cli.monitorTtySize(createResponse.ID, false); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayID + return nil + } + + var status int + + // Attached mode + if *flAutoRemove { + // Autoremove: wait for the container to finish, retrieve + // the exit code and remove the container + if status, err = cli.client.ContainerWait(context.Background(), createResponse.ID); err != nil { + return runStartContainerErr(err) + } + if _, status, err = getExitCode(cli, createResponse.ID); err != nil { + return err + } + } else { + // No Autoremove: Simply retrieve the exit code + if !config.Tty { + // In non-TTY mode, we can't detach, so we must wait for container exit + if status, err = cli.client.ContainerWait(context.Background(), createResponse.ID); err != nil { + return err + } + } else { + // In TTY mode, there is a race: if the process dies too slowly, the state could + // be updated after the getExitCode call and result in the wrong exit code being reported + if _, status, err = getExitCode(cli, createResponse.ID); err != nil { + return err + } + } + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/save.go b/vendor/github.com/docker/docker/api/client/save.go new file mode 100644 index 00000000..4aabf1bd --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/save.go @@ -0,0 +1,42 @@ +package client + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdSave saves one or more images to a tar archive. +// +// The tar archive is written to STDOUT by default, or written to a file. +// +// Usage: docker save [OPTIONS] IMAGE [IMAGE...] +func (cli *DockerCli) CmdSave(args ...string) error { + cmd := Cli.Subcmd("save", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["save"].Description+" (streamed to STDOUT by default)", true) + outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + if *outfile == "" && cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + responseBody, err := cli.client.ImageSave(context.Background(), cmd.Args()) + if err != nil { + return err + } + defer responseBody.Close() + + if *outfile == "" { + _, err := io.Copy(cli.out, responseBody) + return err + } + + return copyToFile(*outfile, responseBody) + +} diff --git a/vendor/github.com/docker/docker/api/client/search.go b/vendor/github.com/docker/docker/api/client/search.go new file mode 100644 index 00000000..82deb409 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/search.go @@ -0,0 +1,93 @@ +package client + +import ( + "fmt" + "net/url" + "sort" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" +) + +// CmdSearch searches the Docker Hub for images. +// +// Usage: docker search [OPTIONS] TERM +func (cli *DockerCli) CmdSearch(args ...string) error { + cmd := Cli.Subcmd("search", []string{"TERM"}, Cli.DockerCommands["search"].Description, true) + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") + stars := cmd.Uint([]string{"s", "-stars"}, 0, "Only displays with at least x stars") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + name := cmd.Arg(0) + v := url.Values{} + v.Set("term", name) + + indexInfo, err := registry.ParseSearchIndexInfo(name) + if err != nil { + return err + } + + authConfig := cli.resolveAuthConfig(indexInfo) + requestPrivilege := cli.registryAuthenticationPrivilegedFunc(indexInfo, "search") + + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageSearchOptions{ + Term: name, + RegistryAuth: encodedAuth, + } + + unorderedResults, err := cli.client.ImageSearch(context.Background(), options, requestPrivilege) + if err != nil { + return err + } + + results := searchResultsByStars(unorderedResults) + sort.Sort(results) + + w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") + for _, res := range results { + if (*automated && !res.IsAutomated) || (int(*stars) > res.StarCount) { + continue + } + desc := strings.Replace(res.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !*noTrunc && len(desc) > 45 { + desc = stringutils.Truncate(desc, 42) + "..." + } + fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) + if res.IsOfficial { + fmt.Fprint(w, "[OK]") + + } + fmt.Fprint(w, "\t") + if res.IsAutomated || res.IsTrusted { + fmt.Fprint(w, "[OK]") + } + fmt.Fprint(w, "\n") + } + w.Flush() + return nil +} + +// SearchResultsByStars sorts search results in descending order by number of stars. +type searchResultsByStars []registrytypes.SearchResult + +func (r searchResultsByStars) Len() int { return len(r) } +func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } diff --git a/vendor/github.com/docker/docker/api/client/start.go b/vendor/github.com/docker/docker/api/client/start.go new file mode 100644 index 00000000..1ff2845f --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/start.go @@ -0,0 +1,157 @@ +package client + +import ( + "fmt" + "io" + "os" + "strings" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/engine-api/types" +) + +func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { + sigc := make(chan os.Signal, 128) + signal.CatchAll(sigc) + go func() { + for s := range sigc { + if s == signal.SIGCHLD || s == signal.SIGPIPE { + continue + } + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s) + continue + } + + if err := cli.client.ContainerKill(context.Background(), cid, sig); err != nil { + logrus.Debugf("Error sending signal: %s", err) + } + } + }() + return sigc +} + +// CmdStart starts one or more containers. +// +// Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdStart(args ...string) error { + cmd := Cli.Subcmd("start", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["start"].Description, true) + attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals") + openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN") + detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + if *attach || *openStdin { + // We're going to attach to a container. + // 1. Ensure we only have one container. + if cmd.NArg() > 1 { + return fmt.Errorf("You cannot start and attach multiple containers at once.") + } + + // 2. Attach to the container. + containerID := cmd.Arg(0) + c, err := cli.client.ContainerInspect(context.Background(), containerID) + if err != nil { + return err + } + + if !c.Config.Tty { + sigc := cli.forwardAllSignals(containerID) + defer signal.StopCatch(sigc) + } + + if *detachKeys != "" { + cli.configFile.DetachKeys = *detachKeys + } + + options := types.ContainerAttachOptions{ + ContainerID: containerID, + Stream: true, + Stdin: *openStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: cli.configFile.DetachKeys, + } + + var in io.ReadCloser + if options.Stdin { + in = cli.in + } + + resp, err := cli.client.ContainerAttach(context.Background(), options) + if err != nil { + return err + } + defer resp.Close() + if in != nil && c.Config.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } + + cErr := promise.Go(func() error { + return cli.holdHijackedConnection(c.Config.Tty, in, cli.out, cli.err, resp) + }) + + // 3. Start the container. + if err := cli.client.ContainerStart(context.Background(), containerID); err != nil { + return err + } + + // 4. Wait for attachment to break. + if c.Config.Tty && cli.isTerminalOut { + if err := cli.monitorTtySize(containerID, false); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + if attchErr := <-cErr; attchErr != nil { + return attchErr + } + _, status, err := getExitCode(cli, containerID) + if err != nil { + return err + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + } else { + // We're not going to attach to anything. + // Start as many containers as we want. + return cli.startContainersWithoutAttachments(cmd.Args()) + } + + return nil +} + +func (cli *DockerCli) startContainersWithoutAttachments(containerIDs []string) error { + var failedContainers []string + for _, containerID := range containerIDs { + if err := cli.client.ContainerStart(context.Background(), containerID); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + failedContainers = append(failedContainers, containerID) + } else { + fmt.Fprintf(cli.out, "%s\n", containerID) + } + } + + if len(failedContainers) > 0 { + return fmt.Errorf("Error: failed to start containers: %v", strings.Join(failedContainers, ", ")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/stats.go b/vendor/github.com/docker/docker/api/client/stats.go new file mode 100644 index 00000000..b84ac3e0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/stats.go @@ -0,0 +1,208 @@ +package client + +import ( + "fmt" + "io" + "strings" + "sync" + "text/tabwriter" + "time" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" +) + +// CmdStats displays a live stream of resource usage statistics for one or more containers. +// +// This shows real-time information on CPU usage, memory usage, and network I/O. +// +// Usage: docker stats [OPTIONS] [CONTAINER...] +func (cli *DockerCli) CmdStats(args ...string) error { + cmd := Cli.Subcmd("stats", []string{"[CONTAINER...]"}, Cli.DockerCommands["stats"].Description, true) + all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") + noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result") + + cmd.ParseFlags(args, true) + + names := cmd.Args() + showAll := len(names) == 0 + closeChan := make(chan error) + + // monitorContainerEvents watches for container creation and removal (only + // used when calling `docker stats` without arguments). + monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { + f := filters.NewArgs() + f.Add("type", "container") + options := types.EventsOptions{ + Filters: f, + } + resBody, err := cli.client.Events(context.Background(), options) + // Whether we successfully subscribed to events or not, we can now + // unblock the main goroutine. + close(started) + if err != nil { + closeChan <- err + return + } + defer resBody.Close() + + decodeEvents(resBody, func(event events.Message, err error) error { + if err != nil { + closeChan <- err + return nil + } + c <- event + return nil + }) + } + + // waitFirst is a WaitGroup to wait first stat data's reach for each container + waitFirst := &sync.WaitGroup{} + + cStats := stats{} + // getContainerList simulates creation event for all previously existing + // containers (only used when calling `docker stats` without arguments). + getContainerList := func() { + options := types.ContainerListOptions{ + All: *all, + } + cs, err := cli.client.ContainerList(context.Background(), options) + if err != nil { + closeChan <- err + } + for _, container := range cs { + s := &containerStats{Name: container.ID[:12]} + if cStats.add(s) { + waitFirst.Add(1) + go s.Collect(cli.client, !*noStream, waitFirst) + } + } + } + + if showAll { + // If no names were specified, start a long running goroutine which + // monitors container events. We make sure we're subscribed before + // retrieving the list of running containers to avoid a race where we + // would "miss" a creation. + started := make(chan struct{}) + eh := eventHandler{handlers: make(map[string]func(events.Message))} + eh.Handle("create", func(e events.Message) { + if *all { + s := &containerStats{Name: e.ID[:12]} + if cStats.add(s) { + waitFirst.Add(1) + go s.Collect(cli.client, !*noStream, waitFirst) + } + } + }) + + eh.Handle("start", func(e events.Message) { + s := &containerStats{Name: e.ID[:12]} + if cStats.add(s) { + waitFirst.Add(1) + go s.Collect(cli.client, !*noStream, waitFirst) + } + }) + + eh.Handle("die", func(e events.Message) { + if !*all { + cStats.remove(e.ID[:12]) + } + }) + + eventChan := make(chan events.Message) + go eh.Watch(eventChan) + go monitorContainerEvents(started, eventChan) + defer close(eventChan) + <-started + + // Start a short-lived goroutine to retrieve the initial list of + // containers. + getContainerList() + } else { + // Artificially send creation events for the containers we were asked to + // monitor (same code path than we use when monitoring all containers). + for _, name := range names { + s := &containerStats{Name: name} + if cStats.add(s) { + waitFirst.Add(1) + go s.Collect(cli.client, !*noStream, waitFirst) + } + } + + // We don't expect any asynchronous errors: closeChan can be closed. + close(closeChan) + + // Do a quick pause to detect any error with the provided list of + // container names. + time.Sleep(1500 * time.Millisecond) + var errs []string + cStats.mu.Lock() + for _, c := range cStats.cs { + c.mu.Lock() + if c.err != nil { + errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) + } + c.mu.Unlock() + } + cStats.mu.Unlock() + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, ", ")) + } + } + + // before print to screen, make sure each container get at least one valid stat data + waitFirst.Wait() + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + printHeader := func() { + if !*noStream { + fmt.Fprint(cli.out, "\033[2J") + fmt.Fprint(cli.out, "\033[H") + } + io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\tPIDS\n") + } + + for range time.Tick(500 * time.Millisecond) { + printHeader() + toRemove := []int{} + cStats.mu.Lock() + for i, s := range cStats.cs { + if err := s.Display(w); err != nil && !*noStream { + toRemove = append(toRemove, i) + } + } + for j := len(toRemove) - 1; j >= 0; j-- { + i := toRemove[j] + cStats.cs = append(cStats.cs[:i], cStats.cs[i+1:]...) + } + if len(cStats.cs) == 0 && !showAll { + return nil + } + cStats.mu.Unlock() + w.Flush() + if *noStream { + break + } + select { + case err, ok := <-closeChan: + if ok { + if err != nil { + // this is suppressing "unexpected EOF" in the cli when the + // daemon restarts so it shutdowns cleanly + if err == io.ErrUnexpectedEOF { + return nil + } + return err + } + } + default: + // just skip + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/stats_helpers.go b/vendor/github.com/docker/docker/api/client/stats_helpers.go new file mode 100644 index 00000000..404c3ff1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/stats_helpers.go @@ -0,0 +1,219 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + "github.com/docker/go-units" + "golang.org/x/net/context" +) + +type containerStats struct { + Name string + CPUPercentage float64 + Memory float64 + MemoryLimit float64 + MemoryPercentage float64 + NetworkRx float64 + NetworkTx float64 + BlockRead float64 + BlockWrite float64 + PidsCurrent uint64 + mu sync.RWMutex + err error +} + +type stats struct { + mu sync.Mutex + cs []*containerStats +} + +func (s *stats) add(cs *containerStats) bool { + s.mu.Lock() + defer s.mu.Unlock() + if _, exists := s.isKnownContainer(cs.Name); !exists { + s.cs = append(s.cs, cs) + return true + } + return false +} + +func (s *stats) remove(id string) { + s.mu.Lock() + if i, exists := s.isKnownContainer(id); exists { + s.cs = append(s.cs[:i], s.cs[i+1:]...) + } + s.mu.Unlock() +} + +func (s *stats) isKnownContainer(cid string) (int, bool) { + for i, c := range s.cs { + if c.Name == cid { + return i, true + } + } + return -1, false +} + +func (s *containerStats) Collect(cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { + var ( + getFirst bool + previousCPU uint64 + previousSystem uint64 + u = make(chan error, 1) + ) + + defer func() { + // if error happens and we get nothing of stats, release wait group whatever + if !getFirst { + getFirst = true + waitFirst.Done() + } + }() + + responseBody, err := cli.ContainerStats(context.Background(), s.Name, streamStats) + if err != nil { + s.mu.Lock() + s.err = err + s.mu.Unlock() + return + } + defer responseBody.Close() + + dec := json.NewDecoder(responseBody) + go func() { + for { + var v *types.StatsJSON + if err := dec.Decode(&v); err != nil { + u <- err + return + } + + var memPercent = 0.0 + var cpuPercent = 0.0 + + // MemoryStats.Limit will never be 0 unless the container is not running and we haven't + // got any data from cgroup + if v.MemoryStats.Limit != 0 { + memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 + } + + previousCPU = v.PreCPUStats.CPUUsage.TotalUsage + previousSystem = v.PreCPUStats.SystemUsage + cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v) + blkRead, blkWrite := calculateBlockIO(v.BlkioStats) + s.mu.Lock() + s.CPUPercentage = cpuPercent + s.Memory = float64(v.MemoryStats.Usage) + s.MemoryLimit = float64(v.MemoryStats.Limit) + s.MemoryPercentage = memPercent + s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks) + s.BlockRead = float64(blkRead) + s.BlockWrite = float64(blkWrite) + s.PidsCurrent = v.PidsStats.Current + s.mu.Unlock() + u <- nil + if !streamStats { + return + } + } + }() + for { + select { + case <-time.After(2 * time.Second): + // zero out the values if we have not received an update within + // the specified duration. + s.mu.Lock() + s.CPUPercentage = 0 + s.Memory = 0 + s.MemoryPercentage = 0 + s.MemoryLimit = 0 + s.NetworkRx = 0 + s.NetworkTx = 0 + s.BlockRead = 0 + s.BlockWrite = 0 + s.PidsCurrent = 0 + s.mu.Unlock() + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + case err := <-u: + if err != nil { + s.mu.Lock() + s.err = err + s.mu.Unlock() + return + } + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + } + if !streamStats { + return + } + } +} + +func (s *containerStats) Display(w io.Writer) error { + s.mu.RLock() + defer s.mu.RUnlock() + if s.err != nil { + return s.err + } + fmt.Fprintf(w, "%s\t%.2f%%\t%s / %s\t%.2f%%\t%s / %s\t%s / %s\t%d\n", + s.Name, + s.CPUPercentage, + units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit), + s.MemoryPercentage, + units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx), + units.HumanSize(s.BlockRead), units.HumanSize(s.BlockWrite), + s.PidsCurrent) + return nil +} + +func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { + var ( + cpuPercent = 0.0 + // calculate the change for the cpu usage of the container in between readings + cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) + // calculate the change for the entire system between readings + systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) + ) + + if systemDelta > 0.0 && cpuDelta > 0.0 { + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 + } + return cpuPercent +} + +func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { + for _, bioEntry := range blkio.IoServiceBytesRecursive { + switch strings.ToLower(bioEntry.Op) { + case "read": + blkRead = blkRead + bioEntry.Value + case "write": + blkWrite = blkWrite + bioEntry.Value + } + } + return +} + +func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { + var rx, tx float64 + + for _, v := range network { + rx += float64(v.RxBytes) + tx += float64(v.TxBytes) + } + return rx, tx +} diff --git a/vendor/github.com/docker/docker/api/client/stop.go b/vendor/github.com/docker/docker/api/client/stop.go new file mode 100644 index 00000000..23d53447 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/stop.go @@ -0,0 +1,37 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdStop stops one or more containers. +// +// A running container is stopped by first sending SIGTERM and then SIGKILL if the container fails to stop within a grace period (the default is 10 seconds). +// +// Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdStop(args ...string) error { + cmd := Cli.Subcmd("stop", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["stop"].Description+".\nSending SIGTERM and then SIGKILL after a grace period", true) + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerStop(context.Background(), name, *nSeconds); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/tag.go b/vendor/github.com/docker/docker/api/client/tag.go new file mode 100644 index 00000000..1d87e437 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/tag.go @@ -0,0 +1,46 @@ +package client + +import ( + "errors" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" +) + +// CmdTag tags an image into a repository. +// +// Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] +func (cli *DockerCli) CmdTag(args ...string) error { + cmd := Cli.Subcmd("tag", []string{"IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]"}, Cli.DockerCommands["tag"].Description, true) + force := cmd.Bool([]string{"#f", "#-force"}, false, "Force the tagging even if there's a conflict") + cmd.Require(flag.Exact, 2) + + cmd.ParseFlags(args, true) + + ref, err := reference.ParseNamed(cmd.Arg(1)) + if err != nil { + return err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + var tag string + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } + + options := types.ImageTagOptions{ + ImageID: cmd.Arg(0), + RepositoryName: ref.Name(), + Tag: tag, + Force: *force, + } + + return cli.client.ImageTag(context.Background(), options) +} diff --git a/vendor/github.com/docker/docker/api/client/top.go b/vendor/github.com/docker/docker/api/client/top.go new file mode 100644 index 00000000..bb2ec46c --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/top.go @@ -0,0 +1,41 @@ +package client + +import ( + "fmt" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdTop displays the running processes of a container. +// +// Usage: docker top CONTAINER +func (cli *DockerCli) CmdTop(args ...string) error { + cmd := Cli.Subcmd("top", []string{"CONTAINER [ps OPTIONS]"}, Cli.DockerCommands["top"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var arguments []string + if cmd.NArg() > 1 { + arguments = cmd.Args()[1:] + } + + procList, err := cli.client.ContainerTop(context.Background(), cmd.Arg(0), arguments) + if err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) + + for _, proc := range procList.Processes { + fmt.Fprintln(w, strings.Join(proc, "\t")) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/unpause.go b/vendor/github.com/docker/docker/api/client/unpause.go new file mode 100644 index 00000000..b8630b1f --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/unpause.go @@ -0,0 +1,34 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdUnpause unpauses all processes within a container, for one or more containers. +// +// Usage: docker unpause CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdUnpause(args ...string) error { + cmd := Cli.Subcmd("unpause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["unpause"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerUnpause(context.Background(), name); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/update.go b/vendor/github.com/docker/docker/api/client/update.go new file mode 100644 index 00000000..a2f9e534 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/update.go @@ -0,0 +1,117 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-units" +) + +// CmdUpdate updates resources of one or more containers. +// +// Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdUpdate(args ...string) error { + cmd := Cli.Subcmd("update", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["update"].Description, true) + flBlkioWeight := cmd.Uint16([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") + flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flCpusetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flCpusetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") + flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") + flMemoryReservation := cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit") + flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flKernelMemory := cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit") + flRestartPolicy := cmd.String([]string{"-restart"}, "", "Restart policy to apply when a container exits") + + cmd.Require(flag.Min, 1) + cmd.ParseFlags(args, true) + if cmd.NFlag() == 0 { + return fmt.Errorf("You must provide one or more flags when using this command.") + } + + var err error + var flMemory int64 + if *flMemoryString != "" { + flMemory, err = units.RAMInBytes(*flMemoryString) + if err != nil { + return err + } + } + + var memoryReservation int64 + if *flMemoryReservation != "" { + memoryReservation, err = units.RAMInBytes(*flMemoryReservation) + if err != nil { + return err + } + } + + var memorySwap int64 + if *flMemorySwap != "" { + if *flMemorySwap == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(*flMemorySwap) + if err != nil { + return err + } + } + } + + var kernelMemory int64 + if *flKernelMemory != "" { + kernelMemory, err = units.RAMInBytes(*flKernelMemory) + if err != nil { + return err + } + } + + var restartPolicy container.RestartPolicy + if *flRestartPolicy != "" { + restartPolicy, err = opts.ParseRestartPolicy(*flRestartPolicy) + if err != nil { + return err + } + } + + resources := container.Resources{ + BlkioWeight: *flBlkioWeight, + CpusetCpus: *flCpusetCpus, + CpusetMems: *flCpusetMems, + CPUShares: *flCPUShares, + Memory: flMemory, + MemoryReservation: memoryReservation, + MemorySwap: memorySwap, + KernelMemory: kernelMemory, + CPUPeriod: *flCPUPeriod, + CPUQuota: *flCPUQuota, + } + + updateConfig := container.UpdateConfig{ + Resources: resources, + RestartPolicy: restartPolicy, + } + + names := cmd.Args() + var errs []string + for _, name := range names { + if err := cli.client.ContainerUpdate(context.Background(), name, updateConfig); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/utils.go b/vendor/github.com/docker/docker/api/client/utils.go new file mode 100644 index 00000000..4deee224 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/utils.go @@ -0,0 +1,202 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + gosignal "os/signal" + "path/filepath" + "runtime" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" +) + +func (cli *DockerCli) electAuthServer() string { + // The daemon `/info` endpoint informs us of the default registry being + // used. This is essential in cross-platforms environment, where for + // example a Linux client might be interacting with a Windows daemon, hence + // the default registry URL might be Windows specific. + serverAddress := registry.IndexServer + if info, err := cli.client.Info(context.Background()); err != nil { + fmt.Fprintf(cli.out, "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) + } else { + serverAddress = info.IndexServerAddress + } + return serverAddress +} + +// encodeAuthToBase64 serializes the auth configuration as JSON base64 payload +func encodeAuthToBase64(authConfig types.AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +func (cli *DockerCli) registryAuthenticationPrivilegedFunc(index *registrytypes.IndexInfo, cmdName string) client.RequestPrivilegeFunc { + return func() (string, error) { + fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName) + indexServer := registry.GetAuthConfigKey(index) + authConfig, err := cli.configureAuth("", "", indexServer, false) + if err != nil { + return "", err + } + return encodeAuthToBase64(authConfig) + } +} + +func (cli *DockerCli) resizeTty(id string, isExec bool) { + height, width := cli.getTtySize() + cli.resizeTtyTo(id, height, width, isExec) +} + +func (cli *DockerCli) resizeTtyTo(id string, height, width int, isExec bool) { + if height == 0 && width == 0 { + return + } + + options := types.ResizeOptions{ + ID: id, + Height: height, + Width: width, + } + + var err error + if isExec { + err = cli.client.ContainerExecResize(context.Background(), options) + } else { + err = cli.client.ContainerResize(context.Background(), options) + } + + if err != nil { + logrus.Debugf("Error resize: %s", err) + } +} + +// getExitCode perform an inspect on the container. It returns +// the running state and the exit code. +func getExitCode(cli *DockerCli, containerID string) (bool, int, error) { + c, err := cli.client.ContainerInspect(context.Background(), containerID) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != client.ErrConnectionFailed { + return false, -1, err + } + return false, -1, nil + } + + return c.State.Running, c.State.ExitCode, nil +} + +// getExecExitCode perform an inspect on the exec command. It returns +// the running state and the exit code. +func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) { + resp, err := cli.client.ContainerExecInspect(context.Background(), execID) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != client.ErrConnectionFailed { + return false, -1, err + } + return false, -1, nil + } + + return resp.Running, resp.ExitCode, nil +} + +func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { + cli.resizeTty(id, isExec) + + if runtime.GOOS == "windows" { + go func() { + prevH, prevW := cli.getTtySize() + for { + time.Sleep(time.Millisecond * 250) + h, w := cli.getTtySize() + + if prevW != w || prevH != h { + cli.resizeTty(id, isExec) + } + prevH = h + prevW = w + } + }() + } else { + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, signal.SIGWINCH) + go func() { + for range sigchan { + cli.resizeTty(id, isExec) + } + }() + } + return nil +} + +func (cli *DockerCli) getTtySize() (int, int) { + if !cli.isTerminalOut { + return 0, 0 + } + ws, err := term.GetWinsize(cli.outFd) + if err != nil { + logrus.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return int(ws.Height), int(ws.Width) +} + +func copyToFile(outfile string, r io.Reader) error { + tmpFile, err := ioutil.TempFile(filepath.Dir(outfile), ".docker_temp_") + if err != nil { + return err + } + + tmpPath := tmpFile.Name() + + _, err = io.Copy(tmpFile, r) + tmpFile.Close() + + if err != nil { + os.Remove(tmpPath) + return err + } + + if err = os.Rename(tmpPath, outfile); err != nil { + os.Remove(tmpPath) + return err + } + + return nil +} + +// resolveAuthConfig is like registry.ResolveAuthConfig, but if using the +// default index, it uses the default index name for the daemon's platform, +// not the client's platform. +func (cli *DockerCli) resolveAuthConfig(index *registrytypes.IndexInfo) types.AuthConfig { + configKey := index.Name + if index.Official { + configKey = cli.electAuthServer() + } + + a, _ := getCredentials(cli.configFile, configKey) + return a +} + +func (cli *DockerCli) retrieveAuthConfigs() map[string]types.AuthConfig { + acs, _ := getAllCredentials(cli.configFile) + return acs +} diff --git a/vendor/github.com/docker/docker/api/client/version.go b/vendor/github.com/docker/docker/api/client/version.go new file mode 100644 index 00000000..ebec4def --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/version.go @@ -0,0 +1,95 @@ +package client + +import ( + "runtime" + "text/template" + "time" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/dockerversion" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils" + "github.com/docker/docker/utils/templates" + "github.com/docker/engine-api/types" +) + +var versionTemplate = `Client: + Version: {{.Client.Version}} + API version: {{.Client.APIVersion}} + Go version: {{.Client.GoVersion}} + Git commit: {{.Client.GitCommit}} + Built: {{.Client.BuildTime}} + OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .Client.Experimental}} + Experimental: {{.Client.Experimental}}{{end}}{{if .ServerOK}} + +Server: + Version: {{.Server.Version}} + API version: {{.Server.APIVersion}} + Go version: {{.Server.GoVersion}} + Git commit: {{.Server.GitCommit}} + Built: {{.Server.BuildTime}} + OS/Arch: {{.Server.Os}}/{{.Server.Arch}}{{if .Server.Experimental}} + Experimental: {{.Server.Experimental}}{{end}}{{end}}` + +// CmdVersion shows Docker version information. +// +// Available version information is shown for: client Docker version, client API version, client Go version, client Git commit, client OS/Arch, server Docker version, server API version, server Go version, server Git commit, and server OS/Arch. +// +// Usage: docker version +func (cli *DockerCli) CmdVersion(args ...string) (err error) { + cmd := Cli.Subcmd("version", nil, Cli.DockerCommands["version"].Description, true) + tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + templateFormat := versionTemplate + if *tmplStr != "" { + templateFormat = *tmplStr + } + + var tmpl *template.Template + if tmpl, err = templates.Parse(templateFormat); err != nil { + return Cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + + vd := types.VersionResponse{ + Client: &types.Version{ + Version: dockerversion.Version, + APIVersion: cli.client.ClientVersion(), + GoVersion: runtime.Version(), + GitCommit: dockerversion.GitCommit, + BuildTime: dockerversion.BuildTime, + Os: runtime.GOOS, + Arch: runtime.GOARCH, + Experimental: utils.ExperimentalBuild(), + }, + } + + serverVersion, err := cli.client.ServerVersion(context.Background()) + if err == nil { + vd.Server = &serverVersion + } + + // first we need to make BuildTime more human friendly + t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime) + if errTime == nil { + vd.Client.BuildTime = t.Format(time.ANSIC) + } + + if vd.ServerOK() { + t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime) + if errTime == nil { + vd.Server.BuildTime = t.Format(time.ANSIC) + } + } + + if err2 := tmpl.Execute(cli.out, vd); err2 != nil && err == nil { + err = err2 + } + cli.out.Write([]byte{'\n'}) + return err +} diff --git a/vendor/github.com/docker/docker/api/client/volume.go b/vendor/github.com/docker/docker/api/client/volume.go new file mode 100644 index 00000000..37e623fb --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/volume.go @@ -0,0 +1,177 @@ +package client + +import ( + "fmt" + "sort" + "text/tabwriter" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +// CmdVolume is the parent subcommand for all volume commands +// +// Usage: docker volume +func (cli *DockerCli) CmdVolume(args ...string) error { + description := Cli.DockerCommands["volume"].Description + "\n\nCommands:\n" + commands := [][]string{ + {"create", "Create a volume"}, + {"inspect", "Return low-level information on a volume"}, + {"ls", "List volumes"}, + {"rm", "Remove a volume"}, + } + + for _, cmd := range commands { + description += fmt.Sprintf(" %-25.25s%s\n", cmd[0], cmd[1]) + } + + description += "\nRun 'docker volume COMMAND --help' for more information on a command" + cmd := Cli.Subcmd("volume", []string{"[COMMAND]"}, description, false) + + cmd.Require(flag.Exact, 0) + err := cmd.ParseFlags(args, true) + cmd.Usage() + return err +} + +// CmdVolumeLs outputs a list of Docker volumes. +// +// Usage: docker volume ls [OPTIONS] +func (cli *DockerCli) CmdVolumeLs(args ...string) error { + cmd := Cli.Subcmd("volume ls", nil, "List volumes", true) + + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display volume names") + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')") + + cmd.Require(flag.Exact, 0) + cmd.ParseFlags(args, true) + + volFilterArgs := filters.NewArgs() + for _, f := range flFilter.GetAll() { + var err error + volFilterArgs, err = filters.ParseFlag(f, volFilterArgs) + if err != nil { + return err + } + } + + volumes, err := cli.client.VolumeList(context.Background(), volFilterArgs) + if err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + for _, warn := range volumes.Warnings { + fmt.Fprintln(cli.err, warn) + } + fmt.Fprintf(w, "DRIVER \tVOLUME NAME") + fmt.Fprintf(w, "\n") + } + + sort.Sort(byVolumeName(volumes.Volumes)) + for _, vol := range volumes.Volumes { + if *quiet { + fmt.Fprintln(w, vol.Name) + continue + } + fmt.Fprintf(w, "%s\t%s\n", vol.Driver, vol.Name) + } + w.Flush() + return nil +} + +type byVolumeName []*types.Volume + +func (r byVolumeName) Len() int { return len(r) } +func (r byVolumeName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byVolumeName) Less(i, j int) bool { + return r[i].Name < r[j].Name +} + +// CmdVolumeInspect displays low-level information on one or more volumes. +// +// Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] +func (cli *DockerCli) CmdVolumeInspect(args ...string) error { + cmd := Cli.Subcmd("volume inspect", []string{"VOLUME [VOLUME...]"}, "Return low-level information on a volume", true) + tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") + + cmd.Require(flag.Min, 1) + cmd.ParseFlags(args, true) + + if err := cmd.Parse(args); err != nil { + return nil + } + + inspectSearcher := func(name string) (interface{}, []byte, error) { + i, err := cli.client.VolumeInspect(context.Background(), name) + return i, nil, err + } + + return cli.inspectElements(*tmplStr, cmd.Args(), inspectSearcher) +} + +// CmdVolumeCreate creates a new volume. +// +// Usage: docker volume create [OPTIONS] +func (cli *DockerCli) CmdVolumeCreate(args ...string) error { + cmd := Cli.Subcmd("volume create", nil, "Create a volume", true) + flDriver := cmd.String([]string{"d", "-driver"}, "local", "Specify volume driver name") + flName := cmd.String([]string{"-name"}, "", "Specify volume name") + + flDriverOpts := opts.NewMapOpts(nil, nil) + cmd.Var(flDriverOpts, []string{"o", "-opt"}, "Set driver specific options") + + flLabels := opts.NewListOpts(nil) + cmd.Var(&flLabels, []string{"-label"}, "Set metadata for a volume") + + cmd.Require(flag.Exact, 0) + cmd.ParseFlags(args, true) + + volReq := types.VolumeCreateRequest{ + Driver: *flDriver, + DriverOpts: flDriverOpts.GetAll(), + Name: *flName, + Labels: runconfigopts.ConvertKVStringsToMap(flLabels.GetAll()), + } + + vol, err := cli.client.VolumeCreate(context.Background(), volReq) + if err != nil { + return err + } + + fmt.Fprintf(cli.out, "%s\n", vol.Name) + return nil +} + +// CmdVolumeRm removes one or more volumes. +// +// Usage: docker volume rm VOLUME [VOLUME...] +func (cli *DockerCli) CmdVolumeRm(args ...string) error { + cmd := Cli.Subcmd("volume rm", []string{"VOLUME [VOLUME...]"}, "Remove a volume", true) + cmd.Require(flag.Min, 1) + cmd.ParseFlags(args, true) + + var status = 0 + + for _, name := range cmd.Args() { + if err := cli.client.VolumeRemove(context.Background(), name); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + fmt.Fprintf(cli.out, "%s\n", name) + } + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/wait.go b/vendor/github.com/docker/docker/api/client/wait.go new file mode 100644 index 00000000..609cd3be --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/wait.go @@ -0,0 +1,37 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdWait blocks until a container stops, then prints its exit code. +// +// If more than one container is specified, this will wait synchronously on each container. +// +// Usage: docker wait CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdWait(args ...string) error { + cmd := Cli.Subcmd("wait", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["wait"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + status, err := cli.client.ContainerWait(context.Background(), name) + if err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%d\n", status) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go new file mode 100644 index 00000000..63560c6d --- /dev/null +++ b/vendor/github.com/docker/docker/api/common.go @@ -0,0 +1,146 @@ +package api + +import ( + "fmt" + "mime" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/types" + "github.com/docker/libtrust" +) + +// Common constants for daemon and client. +const ( + // Version of Current REST API + DefaultVersion version.Version = "1.23" + + // MinVersion represents Minimum REST API version supported + MinVersion version.Version = "1.12" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier string = "scratch" +) + +// byPortInfo is a temporary type used to sort types.Port by its fields +type byPortInfo []types.Port + +func (r byPortInfo) Len() int { return len(r) } +func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortInfo) Less(i, j int) bool { + if r[i].PrivatePort != r[j].PrivatePort { + return r[i].PrivatePort < r[j].PrivatePort + } + + if r[i].IP != r[j].IP { + return r[i].IP < r[j].IP + } + + if r[i].PublicPort != r[j].PublicPort { + return r[i].PublicPort < r[j].PublicPort + } + + return r[i].Type < r[j].Type +} + +// DisplayablePorts returns formatted string representing open ports of container +// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" +// it's used by command 'docker ps' +func DisplayablePorts(ports []types.Port) string { + type portGroup struct { + first int + last int + } + groupMap := make(map[string]*portGroup) + var result []string + var hostMappings []string + var groupMapKeys []string + sort.Sort(byPortInfo(ports)) + for _, port := range ports { + current := port.PrivatePort + portKey := port.Type + if port.IP != "" { + if port.PublicPort != current { + hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + continue + } + portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) + } + group := groupMap[portKey] + + if group == nil { + groupMap[portKey] = &portGroup{first: current, last: current} + // record order that groupMap keys are created + groupMapKeys = append(groupMapKeys, portKey) + continue + } + if current == (group.last + 1) { + group.last = current + continue + } + + result = append(result, formGroup(portKey, group.first, group.last)) + groupMap[portKey] = &portGroup{first: current, last: current} + } + for _, portKey := range groupMapKeys { + g := groupMap[portKey] + result = append(result, formGroup(portKey, g.first, g.last)) + } + result = append(result, hostMappings...) + return strings.Join(result, ", ") +} + +func formGroup(key string, start, last int) string { + parts := strings.Split(key, "/") + groupType := parts[0] + var ip string + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + group := strconv.Itoa(start) + if start != last { + group = fmt.Sprintf("%s-%d", group, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +// MatchesContentType validates the content type against the expected one +func MatchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) + } + return err == nil && mimetype == expectedType +} + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/errors.go b/vendor/github.com/docker/docker/api/server/httputils/errors.go new file mode 100644 index 00000000..cf8d2ae6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/errors.go @@ -0,0 +1,70 @@ +package httputils + +import ( + "net/http" + "strings" + + "github.com/Sirupsen/logrus" +) + +// httpStatusError is an interface +// that errors with custom status codes +// implement to tell the api layer +// which response status to set. +type httpStatusError interface { + HTTPErrorStatusCode() int +} + +// inputValidationError is an interface +// that errors generated by invalid +// inputs can implement to tell the +// api layer to set a 400 status code +// in the response. +type inputValidationError interface { + IsValidationError() bool +} + +// WriteError decodes a specific docker error and sends it in the response. +func WriteError(w http.ResponseWriter, err error) { + if err == nil || w == nil { + logrus.WithFields(logrus.Fields{"error": err, "writer": w}).Error("unexpected HTTP error handling") + return + } + + var statusCode int + errMsg := err.Error() + + switch e := err.(type) { + case httpStatusError: + statusCode = e.HTTPErrorStatusCode() + case inputValidationError: + statusCode = http.StatusBadRequest + default: + // FIXME: this is brittle and should not be necessary, but we still need to identify if + // there are errors falling back into this logic. + // If we need to differentiate between different possible error types, + // we should create appropriate error types that implement the httpStatusError interface. + errStr := strings.ToLower(errMsg) + for keyword, status := range map[string]int{ + "not found": http.StatusNotFound, + "no such": http.StatusNotFound, + "bad parameter": http.StatusBadRequest, + "conflict": http.StatusConflict, + "impossible": http.StatusNotAcceptable, + "wrong login/password": http.StatusUnauthorized, + "unauthorized": http.StatusUnauthorized, + "hasn't been activated": http.StatusForbidden, + } { + if strings.Contains(errStr, keyword) { + statusCode = status + break + } + } + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + http.Error(w, errMsg, statusCode) +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/form.go b/vendor/github.com/docker/docker/api/server/httputils/form.go new file mode 100644 index 00000000..20188c12 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/form.go @@ -0,0 +1,73 @@ +package httputils + +import ( + "fmt" + "net/http" + "path/filepath" + "strconv" + "strings" +) + +// BoolValue transforms a form value in different formats into a boolean type. +func BoolValue(r *http.Request, k string) bool { + s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) + return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") +} + +// BoolValueOrDefault returns the default bool passed if the query param is +// missing, otherwise it's just a proxy to boolValue above +func BoolValueOrDefault(r *http.Request, k string, d bool) bool { + if _, ok := r.Form[k]; !ok { + return d + } + return BoolValue(r, k) +} + +// Int64ValueOrZero parses a form value into an int64 type. +// It returns 0 if the parsing fails. +func Int64ValueOrZero(r *http.Request, k string) int64 { + val, err := Int64ValueOrDefault(r, k, 0) + if err != nil { + return 0 + } + return val +} + +// Int64ValueOrDefault parses a form value into an int64 type. If there is an +// error, returns the error. If there is no value returns the default value. +func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { + if r.Form.Get(field) != "" { + value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) + if err != nil { + return value, err + } + return value, nil + } + return def, nil +} + +// ArchiveOptions stores archive information for different operations. +type ArchiveOptions struct { + Name string + Path string +} + +// ArchiveFormValues parses form values and turns them into ArchiveOptions. +// It fails if the archive name and path are not in the request. +func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { + if err := ParseForm(r); err != nil { + return ArchiveOptions{}, err + } + + name := vars["name"] + path := filepath.FromSlash(r.Form.Get("path")) + + switch { + case name == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") + case path == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") + } + + return ArchiveOptions{name, path}, nil +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/httputils.go b/vendor/github.com/docker/docker/api/server/httputils/httputils.go new file mode 100644 index 00000000..59ee0308 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/httputils.go @@ -0,0 +1,107 @@ +package httputils + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api" + "github.com/docker/docker/pkg/version" +) + +// APIVersionKey is the client's requested API version. +const APIVersionKey = "api-version" + +// UAStringKey is used as key type for user-agent string in net/context struct +const UAStringKey = "upstream-user-agent" + +// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. +// Any function that has the appropriate signature can be registered as a API endpoint (e.g. getVersion). +type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +// HijackConnection interrupts the http response writer to get the +// underlying connection and operate with it. +func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +// CloseStreams ensures that a list for http streams are properly closed. +func CloseStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + +// CheckForJSON makes sure that the request's Content-Type is application/json. +func CheckForJSON(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if api.MatchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +// ParseForm ensures the request form is parsed even with invalid content types. +// If we don't do this, POST method without Content-type (even with empty body) will fail. +func ParseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// ParseMultipartForm ensures the request form is parsed, even with invalid content types. +func ParseMultipartForm(r *http.Request) error { + if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + return json.NewEncoder(w).Encode(v) +} + +// VersionFromContext returns an API version from the context using APIVersionKey. +// It panics if the context value does not have version.Version type. +func VersionFromContext(ctx context.Context) (ver version.Version) { + if ctx == nil { + return + } + val := ctx.Value(APIVersionKey) + if val == nil { + return + } + return val.(version.Version) +} diff --git a/vendor/github.com/docker/docker/api/server/middleware.go b/vendor/github.com/docker/docker/api/server/middleware.go new file mode 100644 index 00000000..2622bf1b --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware.go @@ -0,0 +1,41 @@ +package server + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/authorization" +) + +// handleWithGlobalMiddlwares wraps the handler function for a request with +// the server's global middlewares. The order of the middlewares is backwards, +// meaning that the first in the list will be evaluated last. +func (s *Server) handleWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { + next := handler + + handleVersion := middleware.NewVersionMiddleware(dockerversion.Version, api.DefaultVersion, api.MinVersion) + next = handleVersion(next) + + if s.cfg.EnableCors { + handleCORS := middleware.NewCORSMiddleware(s.cfg.CorsHeaders) + next = handleCORS(next) + } + + handleUserAgent := middleware.NewUserAgentMiddleware(s.cfg.Version) + next = handleUserAgent(next) + + // Only want this on debug level + if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { + next = middleware.DebugRequestMiddleware(next) + } + + if len(s.cfg.AuthorizationPluginNames) > 0 { + s.authZPlugins = authorization.NewPlugins(s.cfg.AuthorizationPluginNames) + handleAuthorization := middleware.NewAuthorizationMiddleware(s.authZPlugins) + next = handleAuthorization(next) + } + + return next +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/authorization.go b/vendor/github.com/docker/docker/api/server/middleware/authorization.go new file mode 100644 index 00000000..cbfa99e7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/authorization.go @@ -0,0 +1,42 @@ +package middleware + +import ( + "net/http" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/authorization" + "golang.org/x/net/context" +) + +// NewAuthorizationMiddleware creates a new Authorization middleware. +func NewAuthorizationMiddleware(plugins []authorization.Plugin) Middleware { + return func(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // FIXME: fill when authN gets in + // User and UserAuthNMethod are taken from AuthN plugins + // Currently tracked in https://github.com/docker/docker/pull/13994 + user := "" + userAuthNMethod := "" + authCtx := authorization.NewCtx(plugins, user, userAuthNMethod, r.Method, r.RequestURI) + + if err := authCtx.AuthZRequest(w, r); err != nil { + logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + rw := authorization.NewResponseModifier(w) + + if err := handler(ctx, rw, r, vars); err != nil { + logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + if err := authCtx.AuthZResponse(rw, r); err != nil { + logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + return nil + } + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/cors.go b/vendor/github.com/docker/docker/api/server/middleware/cors.go new file mode 100644 index 00000000..de21897d --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/cors.go @@ -0,0 +1,33 @@ +package middleware + +import ( + "net/http" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// NewCORSMiddleware creates a new CORS middleware. +func NewCORSMiddleware(defaultHeaders string) Middleware { + return func(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" + // otherwise, all head values will be passed to HTTP handler + corsHeaders := defaultHeaders + if corsHeaders == "" { + corsHeaders = "*" + } + + writeCorsHeaders(w, r, corsHeaders) + return handler(ctx, w, r, vars) + } + } +} + +func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) { + logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) + w.Header().Add("Access-Control-Allow-Origin", corsHeaders) + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") + w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/debug.go b/vendor/github.com/docker/docker/api/server/middleware/debug.go new file mode 100644 index 00000000..be7056f6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/debug.go @@ -0,0 +1,56 @@ +package middleware + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +// DebugRequestMiddleware dumps the request to logger +func DebugRequestMiddleware(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + logrus.Debugf("Calling %s %s", r.Method, r.RequestURI) + + if r.Method != "POST" { + return handler(ctx, w, r, vars) + } + if err := httputils.CheckForJSON(r); err != nil { + return handler(ctx, w, r, vars) + } + maxBodySize := 4096 // 4KB + if r.ContentLength > int64(maxBodySize) { + return handler(ctx, w, r, vars) + } + + body := r.Body + bufReader := bufio.NewReaderSize(body, maxBodySize) + r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + b, err := bufReader.Peek(maxBodySize) + if err != io.EOF { + // either there was an error reading, or the buffer is full (in which case the request is too large) + return handler(ctx, w, r, vars) + } + + var postForm map[string]interface{} + if err := json.Unmarshal(b, &postForm); err == nil { + if _, exists := postForm["password"]; exists { + postForm["password"] = "*****" + } + formStr, errMarshal := json.Marshal(postForm) + if errMarshal == nil { + logrus.Debugf("form data: %s", string(formStr)) + } else { + logrus.Debugf("form data: %q", postForm) + } + } + + return handler(ctx, w, r, vars) + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/middleware.go b/vendor/github.com/docker/docker/api/server/middleware/middleware.go new file mode 100644 index 00000000..588331ae --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/middleware.go @@ -0,0 +1,7 @@ +package middleware + +import "github.com/docker/docker/api/server/httputils" + +// Middleware is an adapter to allow the use of ordinary functions as Docker API filters. +// Any function that has the appropriate signature can be registered as a middleware. +type Middleware func(handler httputils.APIFunc) httputils.APIFunc diff --git a/vendor/github.com/docker/docker/api/server/middleware/user_agent.go b/vendor/github.com/docker/docker/api/server/middleware/user_agent.go new file mode 100644 index 00000000..188196bf --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/user_agent.go @@ -0,0 +1,37 @@ +package middleware + +import ( + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/version" + "golang.org/x/net/context" +) + +// NewUserAgentMiddleware creates a new UserAgent middleware. +func NewUserAgentMiddleware(versionCheck string) Middleware { + serverVersion := version.Version(versionCheck) + + return func(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + ctx = context.WithValue(ctx, httputils.UAStringKey, r.Header.Get("User-Agent")) + + if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { + userAgent := strings.Split(r.Header.Get("User-Agent"), "/") + + // v1.20 onwards includes the GOOS of the client after the version + // such as Docker/1.7.0 (linux) + if len(userAgent) == 2 && strings.Contains(userAgent[1], " ") { + userAgent[1] = strings.Split(userAgent[1], " ")[0] + } + + if len(userAgent) == 2 && !serverVersion.Equal(version.Version(userAgent[1])) { + logrus.Debugf("Client and server don't have the same version (client: %s, server: %s)", userAgent[1], serverVersion) + } + } + return handler(ctx, w, r, vars) + } + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/version.go b/vendor/github.com/docker/docker/api/server/middleware/version.go new file mode 100644 index 00000000..41d518bc --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/version.go @@ -0,0 +1,45 @@ +package middleware + +import ( + "fmt" + "net/http" + "runtime" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/version" + "golang.org/x/net/context" +) + +type badRequestError struct { + error +} + +func (badRequestError) HTTPErrorStatusCode() int { + return http.StatusBadRequest +} + +// NewVersionMiddleware creates a new Version middleware. +func NewVersionMiddleware(versionCheck string, defaultVersion, minVersion version.Version) Middleware { + serverVersion := version.Version(versionCheck) + + return func(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + apiVersion := version.Version(vars["version"]) + if apiVersion == "" { + apiVersion = defaultVersion + } + + if apiVersion.GreaterThan(defaultVersion) { + return badRequestError{fmt.Errorf("client is newer than server (client API version: %s, server API version: %s)", apiVersion, defaultVersion)} + } + if apiVersion.LessThan(minVersion) { + return badRequestError{fmt.Errorf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", apiVersion, minVersion)} + } + + header := fmt.Sprintf("Docker/%s (%s)", serverVersion, runtime.GOOS) + w.Header().Set("Server", header) + ctx = context.WithValue(ctx, httputils.APIVersionKey, apiVersion) + return handler(ctx, w, r, vars) + } + } +} diff --git a/vendor/github.com/docker/docker/api/server/profiler.go b/vendor/github.com/docker/docker/api/server/profiler.go new file mode 100644 index 00000000..3c0dfd08 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/profiler.go @@ -0,0 +1,40 @@ +package server + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + + "github.com/gorilla/mux" +) + +const debugPathPrefix = "/debug/" + +func profilerSetup(mainRouter *mux.Router) { + var r = mainRouter.PathPrefix(debugPathPrefix).Subrouter() + r.HandleFunc("/vars", expVars) + r.HandleFunc("/pprof/", pprof.Index) + r.HandleFunc("/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/pprof/profile", pprof.Profile) + r.HandleFunc("/pprof/symbol", pprof.Symbol) + r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP) + r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP) + r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) + r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) +} + +// Replicated from expvar.go as not public. +func expVars(w http.ResponseWriter, r *http.Request) { + first := true + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/backend.go b/vendor/github.com/docker/docker/api/server/router/build/backend.go new file mode 100644 index 00000000..839f3160 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/backend.go @@ -0,0 +1,20 @@ +package build + +import ( + "io" + + "github.com/docker/docker/builder" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. +type Backend interface { + // Build builds a Docker image referenced by an imageID string. + // + // Note: Tagging an image should not be done by a Builder, it should instead be done + // by the caller. + // + // TODO: make this return a reference instead of string + Build(clientCtx context.Context, config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/build.go b/vendor/github.com/docker/docker/api/server/router/build/build.go new file mode 100644 index 00000000..dc85d1df --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/build.go @@ -0,0 +1,29 @@ +package build + +import "github.com/docker/docker/api/server/router" + +// buildRouter is a router to talk with the build controller +type buildRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend) router.Router { + r := &buildRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the build controller +func (r *buildRouter) Routes() []router.Route { + return r.routes +} + +func (r *buildRouter) initRoutes() { + r.routes = []router.Route{ + router.NewPostRoute("/build", r.postBuild), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/build_routes.go b/vendor/github.com/docker/docker/api/server/router/build/build_routes.go new file mode 100644 index 00000000..a6b787d5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/build_routes.go @@ -0,0 +1,213 @@ +package build + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-units" + "golang.org/x/net/context" +) + +func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { + version := httputils.VersionFromContext(ctx) + options := &types.ImageBuildOptions{} + if httputils.BoolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") { + options.Remove = true + } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { + options.Remove = true + } else { + options.Remove = httputils.BoolValue(r, "rm") + } + if httputils.BoolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") { + options.PullParent = true + } + + options.Dockerfile = r.FormValue("dockerfile") + options.SuppressOutput = httputils.BoolValue(r, "q") + options.NoCache = httputils.BoolValue(r, "nocache") + options.ForceRemove = httputils.BoolValue(r, "forcerm") + options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") + options.Memory = httputils.Int64ValueOrZero(r, "memory") + options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") + options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") + options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") + options.CPUSetCPUs = r.FormValue("cpusetcpus") + options.CPUSetMems = r.FormValue("cpusetmems") + options.CgroupParent = r.FormValue("cgroupparent") + options.Tags = r.Form["t"] + + if r.Form.Get("shmsize") != "" { + shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) + if err != nil { + return nil, err + } + options.ShmSize = shmSize + } + + if i := container.Isolation(r.FormValue("isolation")); i != "" { + if !container.Isolation.IsValid(i) { + return nil, fmt.Errorf("Unsupported isolation: %q", i) + } + options.Isolation = i + } + + var buildUlimits = []*units.Ulimit{} + ulimitsJSON := r.FormValue("ulimits") + if ulimitsJSON != "" { + if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil { + return nil, err + } + options.Ulimits = buildUlimits + } + + var buildArgs = map[string]string{} + buildArgsJSON := r.FormValue("buildargs") + if buildArgsJSON != "" { + if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil { + return nil, err + } + options.BuildArgs = buildArgs + } + var labels = map[string]string{} + labelsJSON := r.FormValue("labels") + if labelsJSON != "" { + if err := json.NewDecoder(strings.NewReader(labelsJSON)).Decode(&labels); err != nil { + return nil, err + } + options.Labels = labels + } + + return options, nil +} + +type syncWriter struct { + w io.Writer + mu sync.Mutex +} + +func (s *syncWriter) Write(b []byte) (count int, err error) { + s.mu.Lock() + count, err = s.w.Write(b) + s.mu.Unlock() + return +} + +func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfigs = map[string]types.AuthConfig{} + authConfigsEncoded = r.Header.Get("X-Registry-Config") + notVerboseBuffer = bytes.NewBuffer(nil) + ) + + if authConfigsEncoded != "" { + authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) + if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting + // to be empty. + } + } + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + sf := streamformatter.NewJSONStreamFormatter() + errf := func(err error) error { + if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { + output.Write(notVerboseBuffer.Bytes()) + } + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an internal error. + if !output.Flushed() { + return err + } + _, err = w.Write(sf.FormatError(err)) + if err != nil { + logrus.Warnf("could not write error response: %v", err) + } + return nil + } + + buildOptions, err := newImageBuildOptions(ctx, r) + if err != nil { + return errf(err) + } + + remoteURL := r.FormValue("remote") + + // Currently, only used if context is from a remote url. + // Look at code in DetectContextFromRemoteURL for more information. + createProgressReader := func(in io.ReadCloser) io.ReadCloser { + progressOutput := sf.NewProgressOutput(output, true) + if buildOptions.SuppressOutput { + progressOutput = sf.NewProgressOutput(notVerboseBuffer, true) + } + return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL) + } + + var ( + context builder.ModifiableContext + dockerfileName string + out io.Writer + ) + context, dockerfileName, err = builder.DetectContextFromRemoteURL(r.Body, remoteURL, createProgressReader) + if err != nil { + return errf(err) + } + defer func() { + if err := context.Close(); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + } + }() + if len(dockerfileName) > 0 { + buildOptions.Dockerfile = dockerfileName + } + + buildOptions.AuthConfigs = authConfigs + + out = output + if buildOptions.SuppressOutput { + out = notVerboseBuffer + } + out = &syncWriter{w: out} + stdout := &streamformatter.StdoutFormatter{Writer: out, StreamFormatter: sf} + stderr := &streamformatter.StderrFormatter{Writer: out, StreamFormatter: sf} + + closeNotifier := make(<-chan bool) + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + imgID, err := br.backend.Build(ctx, buildOptions, + builder.DockerIgnoreContext{ModifiableContext: context}, + stdout, stderr, out, + closeNotifier) + if err != nil { + return errf(err) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if buildOptions.SuppressOutput { + stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} + fmt.Fprintf(stdout, "%s\n", string(imgID)) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/backend.go b/vendor/github.com/docker/docker/api/server/router/container/backend.go new file mode 100644 index 00000000..bd891975 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/backend.go @@ -0,0 +1,71 @@ +package container + +import ( + "io" + "time" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" +) + +// execBackend includes functions to implement to provide exec functionality. +type execBackend interface { + ContainerExecCreate(config *types.ExecConfig) (string, error) + ContainerExecInspect(id string) (*backend.ExecInspect, error) + ContainerExecResize(name string, height, width int) error + ContainerExecStart(name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error + ExecExists(name string) (bool, error) +} + +// copyBackend includes functions to implement to provide container copy functionality. +type copyBackend interface { + ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) + ContainerCopy(name string, res string) (io.ReadCloser, error) + ContainerExport(name string, out io.Writer) error + ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error + ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) +} + +// stateBackend includes functions to implement to provide container state lifecycle functionality. +type stateBackend interface { + ContainerCreate(types.ContainerCreateConfig) (types.ContainerCreateResponse, error) + ContainerKill(name string, sig uint64) error + ContainerPause(name string) error + ContainerRename(oldName, newName string) error + ContainerResize(name string, height, width int) error + ContainerRestart(name string, seconds int) error + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerStart(name string, hostConfig *container.HostConfig) error + ContainerStop(name string, seconds int) error + ContainerUnpause(name string) error + ContainerUpdate(name string, hostConfig *container.HostConfig) ([]string, error) + ContainerWait(name string, timeout time.Duration) (int, error) +} + +// monitorBackend includes functions to implement to provide containers monitoring functionality. +type monitorBackend interface { + ContainerChanges(name string) ([]archive.Change, error) + ContainerInspect(name string, size bool, version version.Version) (interface{}, error) + ContainerLogs(name string, config *backend.ContainerLogsConfig, started chan struct{}) error + ContainerStats(name string, config *backend.ContainerStatsConfig) error + ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) + + Containers(config *types.ContainerListOptions) ([]*types.Container, error) +} + +// attachBackend includes function to implement to provide container attaching functionality. +type attachBackend interface { + ContainerAttach(name string, c *backend.ContainerAttachConfig) error +} + +// Backend is all the methods that need to be implemented to provide container specific functionality. +type Backend interface { + execBackend + copyBackend + stateBackend + monitorBackend + attachBackend +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/container.go b/vendor/github.com/docker/docker/api/server/router/container/container.go new file mode 100644 index 00000000..873f13d2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/container.go @@ -0,0 +1,63 @@ +package container + +import "github.com/docker/docker/api/server/router" + +// containerRouter is a router to talk with the container controller +type containerRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new container router +func NewRouter(b Backend) router.Router { + r := &containerRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the container controller +func (r *containerRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in container router +func (r *containerRouter) initRoutes() { + r.routes = []router.Route{ + // HEAD + router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), + // GET + router.NewGetRoute("/containers/json", r.getContainersJSON), + router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), + router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), + router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), + router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), + router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs), + router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats), + router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), + router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), + router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), + // POST + router.NewPostRoute("/containers/create", r.postContainersCreate), + router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), + router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), + router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), + router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), + router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), + router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), + router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait), + router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), + router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), + router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), + router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), + router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), + router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), + router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), + router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), + // PUT + router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), + // DELETE + router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/container_routes.go b/vendor/github.com/docker/docker/api/server/router/container/container_routes.go new file mode 100644 index 00000000..016e00f0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/container_routes.go @@ -0,0 +1,531 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/runconfig" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" + "golang.org/x/net/websocket" +) + +func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + config := &types.ContainerListOptions{ + All: httputils.BoolValue(r, "all"), + Size: httputils.BoolValue(r, "size"), + Since: r.Form.Get("since"), + Before: r.Form.Get("before"), + Filter: filter, + } + + if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { + limit, err := strconv.Atoi(tmpLimit) + if err != nil { + return err + } + config.Limit = limit + } + + containers, err := s.backend.Containers(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, containers) +} + +func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + stream := httputils.BoolValueOrDefault(r, "stream", true) + if !stream { + w.Header().Set("Content-Type", "application/json") + } + + var closeNotifier <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + config := &backend.ContainerStatsConfig{ + Stream: stream, + OutStream: w, + Stop: closeNotifier, + Version: string(httputils.VersionFromContext(ctx)), + } + + return s.backend.ContainerStats(vars["name"], config) +} + +func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + var closeNotifier <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + containerName := vars["name"] + logsConfig := &backend.ContainerLogsConfig{ + ContainerLogsOptions: types.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + }, + OutStream: w, + Stop: closeNotifier, + } + + chStarted := make(chan struct{}) + if err := s.backend.ContainerLogs(containerName, logsConfig, chStarted); err != nil { + select { + case <-chStarted: + // The client may be expecting all of the data we're sending to + // be multiplexed, so send it through OutStream, which will + // have been set up to handle that if needed. + fmt.Fprintf(logsConfig.OutStream, "Error running logs job: %v\n", err) + default: + return err + } + } + + return nil +} + +func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.backend.ContainerExport(vars["name"], w) +} + +func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // https://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + var hostConfig *container.HostConfig + if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + c, err := runconfig.DecodeHostConfig(r.Body) + if err != nil { + return err + } + + hostConfig = c + } + + if err := s.backend.ContainerStart(vars["name"], hostConfig); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + seconds, _ := strconv.Atoi(r.Form.Get("t")) + + if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +type errContainerIsRunning interface { + ContainerIsRunning() bool +} + +func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var sig syscall.Signal + name := vars["name"] + + // If we have a signal, look at it. Otherwise, do nothing + if sigStr := r.Form.Get("signal"); sigStr != "" { + var err error + if sig, err = signal.ParseSignal(sigStr); err != nil { + return err + } + } + + if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { + var isStopped bool + if e, ok := err.(errContainerIsRunning); ok { + isStopped = !e.ContainerIsRunning() + } + + // Return error that's not caused because the container is stopped. + // Return error if the container is not running and the api is >= 1.20 + // to keep backwards compatibility. + version := httputils.VersionFromContext(ctx) + if version.GreaterThanOrEqualTo("1.20") || !isStopped { + return fmt.Errorf("Cannot kill container %s: %v", name, err) + } + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + timeout, _ := strconv.Atoi(r.Form.Get("t")) + + if err := s.backend.ContainerRestart(vars["name"], timeout); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerPause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerUnpause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + status, err := s.backend.ContainerWait(vars["name"], -1*time.Second) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &types.ContainerWaitResponse{ + StatusCode: status, + }) +} + +func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + changes, err := s.backend.ContainerChanges(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, changes) +} + +func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, procList) +} + +func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + newName := r.Form.Get("name") + if err := s.backend.ContainerRename(name, newName); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var updateConfig container.UpdateConfig + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&updateConfig); err != nil { + return err + } + + hostConfig := &container.HostConfig{ + Resources: updateConfig.Resources, + RestartPolicy: updateConfig.RestartPolicy, + } + + name := vars["name"] + warnings, err := s.backend.ContainerUpdate(name, hostConfig) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &types.ContainerUpdateResponse{ + Warnings: warnings, + }) +} + +func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + name := r.Form.Get("name") + + config, hostConfig, networkingConfig, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil { + return err + } + version := httputils.VersionFromContext(ctx) + adjustCPUShares := version.LessThan("1.19") + + ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ + Name: name, + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + AdjustCPUShares: adjustCPUShares, + }) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, ccr) +} + +func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.ContainerRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + RemoveVolume: httputils.BoolValue(r, "v"), + RemoveLink: httputils.BoolValue(r, "link"), + } + + if err := s.backend.ContainerRm(name, config); err != nil { + // Force a 404 for the empty string + if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") { + return fmt.Errorf("no such container: \"\"") + } + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerResize(vars["name"], height, width) +} + +func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + err := httputils.ParseForm(r) + if err != nil { + return err + } + containerName := vars["name"] + + _, upgrade := r.Header["Upgrade"] + + keys := []byte{} + detachKeys := r.FormValue("detachKeys") + if detachKeys != "" { + keys, err = term.ToBytes(detachKeys) + if err != nil { + logrus.Warnf("Invalid escape keys provided (%s) using default : ctrl-p ctrl-q", detachKeys) + } + } + + hijacker, ok := w.(http.Hijacker) + if !ok { + return fmt.Errorf("error attaching to container %s, hijack connection missing", containerName) + } + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + conn, _, err := hijacker.Hijack() + if err != nil { + return nil, nil, nil, err + } + + // set raw mode + conn.Write([]byte{}) + + if upgrade { + fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + closer := func() error { + httputils.CloseStreams(conn) + return nil + } + return ioutils.NewReadCloserWrapper(conn, closer), conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + UseStdin: httputils.BoolValue(r, "stdin"), + UseStdout: httputils.BoolValue(r, "stdout"), + UseStderr: httputils.BoolValue(r, "stderr"), + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: keys, + MuxStreams: true, + } + + return s.backend.ContainerAttach(containerName, attachConfig) +} + +func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + containerName := vars["name"] + + var keys []byte + var err error + detachKeys := r.FormValue("detachKeys") + if detachKeys != "" { + keys, err = term.ToBytes(detachKeys) + if err != nil { + logrus.Warnf("Invalid escape keys provided (%s) using default : ctrl-p ctrl-q", detachKeys) + } + } + + done := make(chan struct{}) + started := make(chan struct{}) + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + wsChan := make(chan *websocket.Conn) + h := func(conn *websocket.Conn) { + wsChan <- conn + <-done + } + + srv := websocket.Server{Handler: h, Handshake: nil} + go func() { + close(started) + srv.ServeHTTP(w, r) + }() + + conn := <-wsChan + return conn, conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: keys, + UseStdin: true, + UseStdout: true, + UseStderr: true, + MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr + } + + err = s.backend.ContainerAttach(containerName, attachConfig) + close(done) + select { + case <-started: + logrus.Errorf("Error attaching websocket: %s", err) + return nil + default: + } + return err +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/copy.go b/vendor/github.com/docker/docker/api/server/router/container/copy.go new file mode 100644 index 00000000..69584b31 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/copy.go @@ -0,0 +1,112 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// postContainersCopy is deprecated in favor of getContainersArchive. +func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cfg := types.CopyConfig{} + if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { + return err + } + + if cfg.Resource == "" { + return fmt.Errorf("Path cannot be empty") + } + + data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "no such container") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if os.IsNotExist(err) { + return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) + } + return err + } + defer data.Close() + + w.Header().Set("Content-Type", "application/x-tar") + if _, err := io.Copy(w, data); err != nil { + return err + } + + return nil +} + +// // Encode the stat to JSON, base64 encode, and place in a header. +func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { + statJSON, err := json.Marshal(stat) + if err != nil { + return err + } + + header.Set( + "X-Docker-Container-Path-Stat", + base64.StdEncoding.EncodeToString(statJSON), + ) + + return nil +} + +func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + stat, err := s.backend.ContainerStatPath(v.Name, v.Path) + if err != nil { + return err + } + + return setContainerPathStatHeader(stat, w.Header()) +} + +func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) + if err != nil { + return err + } + defer tarArchive.Close() + + if err := setContainerPathStatHeader(stat, w.Header()); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, tarArchive) + + return err +} + +func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") + return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body) +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/exec.go b/vendor/github.com/docker/docker/api/server/router/container/exec.go new file mode 100644 index 00000000..1a3ddc4c --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/exec.go @@ -0,0 +1,134 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + eConfig, err := s.backend.ContainerExecInspect(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, eConfig) +} + +func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + name := vars["name"] + + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { + return err + } + execConfig.Container = name + + if len(execConfig.Cmd) == 0 { + return fmt.Errorf("No exec command specified") + } + + // Register an instance of Exec in container. + id, err := s.backend.ContainerExecCreate(execConfig) + if err != nil { + logrus.Errorf("Error setting up exec command in container %s: %v", name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ + ID: id, + }) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + version := httputils.VersionFromContext(ctx) + if version.GreaterThan("1.21") { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + } + + var ( + execName = vars["name"] + stdin, inStream io.ReadCloser + stdout, stderr, outStream io.Writer + ) + + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { + return err + } + + if exists, err := s.backend.ExecExists(execName); !exists { + return err + } + + if !execStartCheck.Detach { + var err error + // Setting up the streaming http interface. + inStream, outStream, err = httputils.HijackConnection(w) + if err != nil { + return err + } + defer httputils.CloseStreams(inStream, outStream) + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + stdin = inStream + stdout = outStream + if !execStartCheck.Tty { + stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + } + + // Now run the user process in container. + if err := s.backend.ContainerExecStart(execName, stdin, stdout, stderr); err != nil { + if execStartCheck.Detach { + return err + } + stdout.Write([]byte(err.Error())) + logrus.Errorf("Error running exec in container: %v\n", err) + return err + } + return nil +} + +func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerExecResize(vars["name"], height, width) +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/inspect.go b/vendor/github.com/docker/docker/api/server/router/container/inspect.go new file mode 100644 index 00000000..dbbced7e --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/inspect.go @@ -0,0 +1,21 @@ +package container + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// getContainersByName inspects container's configuration and serializes it as json. +func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + displaySize := httputils.BoolValue(r, "size") + + version := httputils.VersionFromContext(ctx) + json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, json) +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/backend.go b/vendor/github.com/docker/docker/api/server/router/image/backend.go new file mode 100644 index 00000000..dfb02a4d --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/backend.go @@ -0,0 +1,44 @@ +package image + +import ( + "io" + + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/registry" + "golang.org/x/net/context" +) + +// Backend is all the methods that need to be implemented +// to provide image specific functionality. +type Backend interface { + containerBackend + imageBackend + importExportBackend + registryBackend +} + +type containerBackend interface { + Commit(name string, config *types.ContainerCommitConfig) (imageID string, err error) +} + +type imageBackend interface { + ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) + ImageHistory(imageName string) ([]*types.ImageHistory, error) + Images(filterArgs string, filter string, all bool) ([]*types.Image, error) + LookupImage(name string) (*types.ImageInspect, error) + TagImage(newTag reference.Named, imageName string) error +} + +type importExportBackend interface { + LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error + ImportImage(src string, newRef reference.Named, msg string, inConfig io.ReadCloser, outStream io.Writer, config *container.Config) error + ExportImage(names []string, outStream io.Writer) error +} + +type registryBackend interface { + PullImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + PushImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + SearchRegistryForImages(ctx context.Context, term string, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/image.go b/vendor/github.com/docker/docker/api/server/router/image/image.go new file mode 100644 index 00000000..d6a1297a --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/image.go @@ -0,0 +1,44 @@ +package image + +import "github.com/docker/docker/api/server/router" + +// imageRouter is a router to talk with the image controller +type imageRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new image router +func NewRouter(backend Backend) router.Router { + r := &imageRouter{ + backend: backend, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the image controller +func (r *imageRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in the image router +func (r *imageRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/images/json", r.getImagesJSON), + router.NewGetRoute("/images/search", r.getImagesSearch), + router.NewGetRoute("/images/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), + router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName), + // POST + router.NewPostRoute("/commit", r.postCommit), + router.NewPostRoute("/images/create", r.postImagesCreate), + router.NewPostRoute("/images/load", r.postImagesLoad), + router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush), + router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), + // DELETE + router.NewDeleteRoute("/images/{name:.*}", r.deleteImages), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/image_routes.go b/vendor/github.com/docker/docker/api/server/router/image/image_routes.go new file mode 100644 index 00000000..58abd4b4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/image_routes.go @@ -0,0 +1,383 @@ +package image + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/reference" + "github.com/docker/docker/runconfig" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "golang.org/x/net/context" +) + +func (s *imageRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cname := r.Form.Get("container") + + pause := httputils.BoolValue(r, "pause") + version := httputils.VersionFromContext(ctx) + if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { + pause = true + } + + c, _, _, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil && err != io.EOF { //Do not fail if body is empty. + return err + } + if c == nil { + c = &container.Config{} + } + + newConfig, err := dockerfile.BuildFromConfig(c, r.Form["changes"]) + if err != nil { + return err + } + + commitCfg := &types.ContainerCommitConfig{ + Pause: pause, + Repo: r.Form.Get("repo"), + Tag: r.Form.Get("tag"), + Author: r.Form.Get("author"), + Comment: r.Form.Get("comment"), + Config: newConfig, + MergeConfigs: true, + } + + imgID, err := s.backend.Commit(cname, commitCfg) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerCommitResponse{ + ID: string(imgID), + }) +} + +// Creates an image from Pull or from Import +func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + message = r.Form.Get("message") + err error + output = ioutils.NewWriteFlusher(w) + ) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if image != "" { //pull + // Special case: "pull -a" may send an image name with a + // trailing :. This is ugly, but let's not break API + // compatibility. + image = strings.TrimSuffix(image, ":") + + var ref reference.Named + ref, err = reference.ParseNamed(image) + if err == nil { + if tag != "" { + // The "tag" could actually be a digest. + var dgst digest.Digest + dgst, err = digest.ParseDigest(tag) + if err == nil { + ref, err = reference.WithDigest(ref, dgst) + } else { + ref, err = reference.WithTag(ref, tag) + } + } + if err == nil { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } + + err = s.backend.PullImage(ctx, ref, metaHeaders, authConfig, output) + } + } + } else { //import + var newRef reference.Named + if repo != "" { + var err error + newRef, err = reference.ParseNamed(repo) + if err != nil { + return err + } + + if _, isCanonical := newRef.(reference.Canonical); isCanonical { + return errors.New("cannot import digest reference") + } + + if tag != "" { + newRef, err = reference.WithTag(newRef, tag) + if err != nil { + return err + } + } + } + + src := r.Form.Get("fromSrc") + + // 'err' MUST NOT be defined within this block, we need any error + // generated from the download to be available to the output + // stream processing below + var newConfig *container.Config + newConfig, err = dockerfile.BuildFromConfig(&container.Config{}, r.Form["changes"]) + if err != nil { + return err + } + + err = s.backend.ImportImage(src, newRef, message, r.Body, output, newConfig) + } + if err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + + return nil +} + +func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := httputils.ParseForm(r); err != nil { + return err + } + authConfig := &types.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) + } + } + + ref, err := reference.ParseNamed(vars["name"]) + if err != nil { + return err + } + tag := r.Form.Get("tag") + if tag != "" { + // Push by digest is not supported, so only tags are supported. + ref, err = reference.WithTag(ref, tag) + if err != nil { + return err + } + } + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if err := s.backend.PushImage(ctx, ref, metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + var names []string + if name, ok := vars["name"]; ok { + names = []string{name} + } else { + names = r.Form["names"] + } + + if err := s.backend.ExportImage(names, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + quiet := httputils.BoolValueOrDefault(r, "quiet", true) + + if !quiet { + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + return nil + } + return s.backend.LoadImage(r.Body, w, quiet) +} + +func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + + if strings.TrimSpace(name) == "" { + return fmt.Errorf("image name cannot be blank") + } + + force := httputils.BoolValue(r, "force") + prune := !httputils.BoolValue(r, "noprune") + + list, err := s.backend.ImageDelete(name, force, prune) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + imageInspect, err := s.backend.LookupImage(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, imageInspect) +} + +func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // FIXME: The filter parameter could just be a match filter + images, err := s.backend.Images(r.Form.Get("filters"), r.Form.Get("filter"), httputils.BoolValue(r, "all")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, images) +} + +func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + name := vars["name"] + history, err := s.backend.ImageHistory(name) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, history) +} + +func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + repo := r.Form.Get("repo") + tag := r.Form.Get("tag") + newTag, err := reference.WithName(repo) + if err != nil { + return err + } + if tag != "" { + if newTag, err = reference.WithTag(newTag, tag); err != nil { + return err + } + } + if err := s.backend.TagImage(newTag, vars["name"]); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + var ( + config *types.AuthConfig + authEncoded = r.Header.Get("X-Registry-Auth") + headers = map[string][]string{} + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &types.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + headers[k] = v + } + } + query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("term"), config, headers) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, query.Results) +} diff --git a/vendor/github.com/docker/docker/api/server/router/local.go b/vendor/github.com/docker/docker/api/server/router/local.go new file mode 100644 index 00000000..99db4242 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/local.go @@ -0,0 +1,61 @@ +package router + +import "github.com/docker/docker/api/server/httputils" + +// localRoute defines an individual API route to connect +// with the docker daemon. It implements Route. +type localRoute struct { + method string + path string + handler httputils.APIFunc +} + +// Handler returns the APIFunc to let the server wrap it in middlewares. +func (l localRoute) Handler() httputils.APIFunc { + return l.handler +} + +// Method returns the http method that the route responds to. +func (l localRoute) Method() string { + return l.method +} + +// Path returns the subpath where the route responds to. +func (l localRoute) Path() string { + return l.path +} + +// NewRoute initializes a new local route for the router. +func NewRoute(method, path string, handler httputils.APIFunc) Route { + return localRoute{method, path, handler} +} + +// NewGetRoute initializes a new route with the http method GET. +func NewGetRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("GET", path, handler) +} + +// NewPostRoute initializes a new route with the http method POST. +func NewPostRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("POST", path, handler) +} + +// NewPutRoute initializes a new route with the http method PUT. +func NewPutRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("PUT", path, handler) +} + +// NewDeleteRoute initializes a new route with the http method DELETE. +func NewDeleteRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("DELETE", path, handler) +} + +// NewOptionsRoute initializes a new route with the http method OPTIONS. +func NewOptionsRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("OPTIONS", path, handler) +} + +// NewHeadRoute initializes a new route with the http method HEAD. +func NewHeadRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("HEAD", path, handler) +} diff --git a/vendor/github.com/docker/docker/api/server/router/router.go b/vendor/github.com/docker/docker/api/server/router/router.go new file mode 100644 index 00000000..2de25c27 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/router.go @@ -0,0 +1,19 @@ +package router + +import "github.com/docker/docker/api/server/httputils" + +// Router defines an interface to specify a group of routes to add to the docker server. +type Router interface { + // Routes returns the list of routes to add to the docker server. + Routes() []Route +} + +// Route defines an individual API route in the docker server. +type Route interface { + // Handler returns the raw function to create the http handler. + Handler() httputils.APIFunc + // Method returns the http method that the route responds to. + Method() string + // Path returns the subpath where the route responds to. + Path() string +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/backend.go b/vendor/github.com/docker/docker/api/server/router/system/backend.go new file mode 100644 index 00000000..e6284cd4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/backend.go @@ -0,0 +1,18 @@ +package system + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// Backend is the methods that need to be implemented to provide +// system specific functionality. +type Backend interface { + SystemInfo() (*types.Info, error) + SystemVersion() types.Version + SubscribeToEvents(since, sinceNano int64, ef filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(chan interface{}) + AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/system.go b/vendor/github.com/docker/docker/api/server/router/system/system.go new file mode 100644 index 00000000..76da5c52 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/system.go @@ -0,0 +1,33 @@ +package system + +import "github.com/docker/docker/api/server/router" + +// systemRouter provides information about the Docker system overall. +// It gathers information about host, daemon and container events. +type systemRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new system router +func NewRouter(b Backend) router.Router { + r := &systemRouter{ + backend: b, + } + + r.routes = []router.Route{ + router.NewOptionsRoute("/{anyroute:.*}", optionsHandler), + router.NewGetRoute("/_ping", pingHandler), + router.NewGetRoute("/events", r.getEvents), + router.NewGetRoute("/info", r.getInfo), + router.NewGetRoute("/version", r.getVersion), + router.NewPostRoute("/auth", r.postAuth), + } + + return r +} + +// Routes returns all the API routes dedicated to the docker system +func (s *systemRouter) Routes() []router.Route { + return s.routes +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/system_routes.go b/vendor/github.com/docker/docker/api/server/router/system/system_routes.go new file mode 100644 index 00000000..defaa0d6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/system_routes.go @@ -0,0 +1,125 @@ +package system + +import ( + "encoding/json" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" + timetypes "github.com/docker/engine-api/types/time" + "golang.org/x/net/context" +) + +func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} + +func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info, err := s.backend.SystemInfo() + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info := s.backend.SystemVersion() + info.APIVersion = api.DefaultVersion.String() + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + since, sinceNano, err := timetypes.ParseTimestamps(r.Form.Get("since"), -1) + if err != nil { + return err + } + until, untilNano, err := timetypes.ParseTimestamps(r.Form.Get("until"), -1) + if err != nil { + return err + } + + var timeout <-chan time.Time + if until > 0 || untilNano > 0 { + dur := time.Unix(until, untilNano).Sub(time.Now()) + timeout = time.NewTimer(dur).C + } + + ef, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + defer output.Close() + output.Flush() + + enc := json.NewEncoder(output) + + buffered, l := s.backend.SubscribeToEvents(since, sinceNano, ef) + defer s.backend.UnsubscribeFromEvents(l) + + for _, ev := range buffered { + if err := enc.Encode(ev); err != nil { + return err + } + } + + var closeNotify <-chan bool + if closeNotifier, ok := w.(http.CloseNotifier); ok { + closeNotify = closeNotifier.CloseNotify() + } + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + logrus.Warnf("unexpected event message: %q", ev) + continue + } + if err := enc.Encode(jev); err != nil { + return err + } + case <-timeout: + return nil + case <-closeNotify: + logrus.Debug("Client disconnected, stop sending events") + return nil + } + } +} + +func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config *types.AuthConfig + err := json.NewDecoder(r.Body).Decode(&config) + r.Body.Close() + if err != nil { + return err + } + status, token, err := s.backend.AuthenticateToRegistry(ctx, config) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &types.AuthResponse{ + Status: status, + IdentityToken: token, + }) +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/backend.go b/vendor/github.com/docker/docker/api/server/router/volume/backend.go new file mode 100644 index 00000000..fbf5ed27 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/backend.go @@ -0,0 +1,15 @@ +package volume + +import ( + // TODO return types need to be refactored into pkg + "github.com/docker/engine-api/types" +) + +// Backend is the methods that need to be implemented to provide +// volume specific functionality +type Backend interface { + Volumes(filter string) ([]*types.Volume, []string, error) + VolumeInspect(name string) (*types.Volume, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + VolumeRm(name string) error +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/volume.go b/vendor/github.com/docker/docker/api/server/router/volume/volume.go new file mode 100644 index 00000000..2683dcec --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/volume.go @@ -0,0 +1,35 @@ +package volume + +import "github.com/docker/docker/api/server/router" + +// volumeRouter is a router to talk with the volumes controller +type volumeRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new volume router +func NewRouter(b Backend) router.Router { + r := &volumeRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the volumes controller +func (r *volumeRouter) Routes() []router.Route { + return r.routes +} + +func (r *volumeRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/volumes", r.getVolumesList), + router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), + // POST + router.NewPostRoute("/volumes/create", r.postVolumesCreate), + // DELETE + router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go b/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go new file mode 100644 index 00000000..5aa0d4a7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go @@ -0,0 +1,66 @@ +package volume + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters")) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &types.VolumesListResponse{Volumes: volumes, Warnings: warnings}) +} + +func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volume, err := v.backend.VolumeInspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, volume) +} + +func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var req types.VolumeCreateRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusCreated, volume) +} + +func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := v.backend.VolumeRm(vars["name"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} diff --git a/vendor/github.com/docker/docker/api/server/router_swapper.go b/vendor/github.com/docker/docker/api/server/router_swapper.go new file mode 100644 index 00000000..1ecc7a7f --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router_swapper.go @@ -0,0 +1,30 @@ +package server + +import ( + "net/http" + "sync" + + "github.com/gorilla/mux" +) + +// routerSwapper is an http.Handler that allows you to swap +// mux routers. +type routerSwapper struct { + mu sync.Mutex + router *mux.Router +} + +// Swap changes the old router with the new one. +func (rs *routerSwapper) Swap(newRouter *mux.Router) { + rs.mu.Lock() + rs.router = newRouter + rs.mu.Unlock() +} + +// ServeHTTP makes the routerSwapper to implement the http.Handler interface. +func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { + rs.mu.Lock() + router := rs.router + rs.mu.Unlock() + router.ServeHTTP(w, r) +} diff --git a/vendor/github.com/docker/docker/api/server/server.go b/vendor/github.com/docker/docker/api/server/server.go new file mode 100644 index 00000000..1379b737 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/server.go @@ -0,0 +1,195 @@ +package server + +import ( + "crypto/tls" + "net" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/pkg/authorization" + "github.com/gorilla/mux" + "golang.org/x/net/context" +) + +// versionMatcher defines a variable matcher to be parsed by the router +// when a request is about to be served. +const versionMatcher = "/v{version:[0-9.]+}" + +// Config provides the configuration for the API server +type Config struct { + Logging bool + EnableCors bool + CorsHeaders string + AuthorizationPluginNames []string + Version string + SocketGroup string + TLSConfig *tls.Config +} + +// Server contains instance details for the server +type Server struct { + cfg *Config + servers []*HTTPServer + routers []router.Router + authZPlugins []authorization.Plugin + routerSwapper *routerSwapper +} + +// New returns a new instance of the server based on the specified configuration. +// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). +func New(cfg *Config) *Server { + return &Server{ + cfg: cfg, + } +} + +// Accept sets a listener the server accepts connections into. +func (s *Server) Accept(addr string, listeners ...net.Listener) { + for _, listener := range listeners { + httpServer := &HTTPServer{ + srv: &http.Server{ + Addr: addr, + }, + l: listener, + } + s.servers = append(s.servers, httpServer) + } +} + +// Close closes servers and thus stop receiving requests +func (s *Server) Close() { + for _, srv := range s.servers { + if err := srv.Close(); err != nil { + logrus.Error(err) + } + } +} + +// serveAPI loops through all initialized servers and spawns goroutine +// with Server method for each. It sets createMux() as Handler also. +func (s *Server) serveAPI() error { + var chErrors = make(chan error, len(s.servers)) + for _, srv := range s.servers { + srv.srv.Handler = s.routerSwapper + go func(srv *HTTPServer) { + var err error + logrus.Infof("API listen on %s", srv.l.Addr()) + if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err + }(srv) + } + + for i := 0; i < len(s.servers); i++ { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} + +// HTTPServer contains an instance of http server and the listener. +// srv *http.Server, contains configuration to create a http server and a mux router with all api end points. +// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. +type HTTPServer struct { + srv *http.Server + l net.Listener +} + +// Serve starts listening for inbound requests. +func (s *HTTPServer) Serve() error { + return s.srv.Serve(s.l) +} + +// Close closes the HTTPServer from listening for the inbound requests. +func (s *HTTPServer) Close() error { + return s.l.Close() +} + +func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Define the context that we'll pass around to share info + // like the docker-request-id. + // + // The 'context' will be used for global data that should + // apply to all requests. Data that is specific to the + // immediate function being called should still be passed + // as 'args' on the function call. + ctx := context.Background() + handlerFunc := s.handleWithGlobalMiddlewares(handler) + + vars := mux.Vars(r) + if vars == nil { + vars = make(map[string]string) + } + + if err := handlerFunc(ctx, w, r, vars); err != nil { + logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) + httputils.WriteError(w, err) + } + } +} + +// InitRouter initializes the list of routers for the server. +// This method also enables the Go profiler if enableProfiler is true. +func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) { + for _, r := range routers { + s.routers = append(s.routers, r) + } + + m := s.createMux() + if enableProfiler { + profilerSetup(m) + } + s.routerSwapper = &routerSwapper{ + router: m, + } +} + +// createMux initializes the main router the server uses. +func (s *Server) createMux() *mux.Router { + m := mux.NewRouter() + + logrus.Debugf("Registering routers") + for _, apiRouter := range s.routers { + for _, r := range apiRouter.Routes() { + f := s.makeHTTPHandler(r.Handler()) + + logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) + m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) + m.Path(r.Path()).Methods(r.Method()).Handler(f) + } + } + + return m +} + +// Wait blocks the server goroutine until it exits. +// It sends an error message if there is any error during +// the API execution. +func (s *Server) Wait(waitChan chan error) { + if err := s.serveAPI(); err != nil { + logrus.Errorf("ServeAPI error: %v", err) + waitChan <- err + return + } + waitChan <- nil +} + +// DisableProfiler reloads the server mux without adding the profiler routes. +func (s *Server) DisableProfiler() { + s.routerSwapper.Swap(s.createMux()) +} + +// EnableProfiler reloads the server mux adding the profiler routes. +func (s *Server) EnableProfiler() { + m := s.createMux() + profilerSetup(m) + s.routerSwapper.Swap(m) +} diff --git a/vendor/github.com/docker/docker/api/types/backend/backend.go b/vendor/github.com/docker/docker/api/types/backend/backend.go new file mode 100644 index 00000000..ffe9b709 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/backend/backend.go @@ -0,0 +1,69 @@ +// Package backend includes types to send information to server backends. +// TODO(calavera): This package is pending of extraction to engine-api +// when the server package is clean of daemon dependencies. +package backend + +import ( + "io" + + "github.com/docker/engine-api/types" +) + +// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. +type ContainerAttachConfig struct { + GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error) + UseStdin bool + UseStdout bool + UseStderr bool + Logs bool + Stream bool + DetachKeys []byte + + // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/sderr messages accordingly. + // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... + // HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream. + // Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately. + MuxStreams bool +} + +// ContainerLogsConfig holds configs for logging operations. Exists +// for users of the backend to to pass it a logging configuration. +type ContainerLogsConfig struct { + types.ContainerLogsOptions + OutStream io.Writer + Stop <-chan bool +} + +// ContainerStatsConfig holds information for configuring the runtime +// behavior of a backend.ContainerStats() call. +type ContainerStatsConfig struct { + Stream bool + OutStream io.Writer + Stop <-chan bool + Version string +} + +// ExecInspect holds information about a running process started +// with docker exec. +type ExecInspect struct { + ID string + Running bool + ExitCode *int + ProcessConfig *ExecProcessConfig + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte +} + +// ExecProcessConfig holds information about the exec process +// running on the host. +type ExecProcessConfig struct { + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Privileged *bool `json:"privileged,omitempty"` + User string `json:"user,omitempty"` +} diff --git a/vendor/github.com/docker/docker/builder/builder.go b/vendor/github.com/docker/docker/builder/builder.go new file mode 100644 index 00000000..6f8fdb8d --- /dev/null +++ b/vendor/github.com/docker/docker/builder/builder.go @@ -0,0 +1,153 @@ +// Package builder defines interfaces for any Docker builder to implement. +// +// Historically, only server-side Dockerfile interpreters existed. +// This package allows for other implementations of Docker builders. +package builder + +import ( + "io" + "os" + "time" + + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "golang.org/x/net/context" +) + +const ( + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" +) + +// Context represents a file system tree. +type Context interface { + // Close allows to signal that the filesystem tree won't be used anymore. + // For Context implementations using a temporary directory, it is recommended to + // delete the temporary directory in Close(). + Close() error + // Stat returns an entry corresponding to path if any. + // It is recommended to return an error if path was not found. + // If path is a symlink it also returns the path to the target file. + Stat(path string) (string, FileInfo, error) + // Open opens path from the context and returns a readable stream of it. + Open(path string) (io.ReadCloser, error) + // Walk walks the tree of the context with the function passed to it. + Walk(root string, walkFn WalkFunc) error +} + +// WalkFunc is the type of the function called for each file or directory visited by Context.Walk(). +type WalkFunc func(path string, fi FileInfo, err error) error + +// ModifiableContext represents a modifiable Context. +// TODO: remove this interface once we can get rid of Remove() +type ModifiableContext interface { + Context + // Remove deletes the entry specified by `path`. + // It is usual for directory entries to delete all its subentries. + Remove(path string) error +} + +// FileInfo extends os.FileInfo to allow retrieving an absolute path to the file. +// TODO: remove this interface once pkg/archive exposes a walk function that Context can use. +type FileInfo interface { + os.FileInfo + Path() string +} + +// PathFileInfo is a convenience struct that implements the FileInfo interface. +type PathFileInfo struct { + os.FileInfo + // FilePath holds the absolute path to the file. + FilePath string + // Name holds the basename for the file. + FileName string +} + +// Path returns the absolute path to the file. +func (fi PathFileInfo) Path() string { + return fi.FilePath +} + +// Name returns the basename of the file. +func (fi PathFileInfo) Name() string { + if fi.FileName != "" { + return fi.FileName + } + return fi.FileInfo.Name() +} + +// Hashed defines an extra method intended for implementations of os.FileInfo. +type Hashed interface { + // Hash returns the hash of a file. + Hash() string + SetHash(string) +} + +// HashedFileInfo is a convenient struct that augments FileInfo with a field. +type HashedFileInfo struct { + FileInfo + // FileHash represents the hash of a file. + FileHash string +} + +// Hash returns the hash of a file. +func (fi HashedFileInfo) Hash() string { + return fi.FileHash +} + +// SetHash sets the hash of a file. +func (fi *HashedFileInfo) SetHash(h string) { + fi.FileHash = h +} + +// Backend abstracts calls to a Docker Daemon. +type Backend interface { + // TODO: use digest reference instead of name + + // GetImage looks up a Docker image referenced by `name`. + GetImageOnBuild(name string) (Image, error) + // Tag an image with newTag + TagImage(newTag reference.Named, imageName string) error + // Pull tells Docker to pull image referenced by `name`. + PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error) + // ContainerAttach attaches to container. + ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error + // ContainerCreate creates a new Docker container and returns potential warnings + ContainerCreate(types.ContainerCreateConfig) (types.ContainerCreateResponse, error) + // ContainerRm removes a container specified by `id`. + ContainerRm(name string, config *types.ContainerRmConfig) error + // Commit creates a new Docker image from an existing Docker container. + Commit(string, *types.ContainerCommitConfig) (string, error) + // Kill stops the container execution abruptly. + ContainerKill(containerID string, sig uint64) error + // Start starts a new container + ContainerStart(containerID string, hostConfig *container.HostConfig) error + // ContainerWait stops processing until the given container is stopped. + ContainerWait(containerID string, timeout time.Duration) (int, error) + // ContainerUpdateCmd updates container.Path and container.Args + ContainerUpdateCmdOnBuild(containerID string, cmd []string) error + + // ContainerCopy copies/extracts a source FileInfo to a destination path inside a container + // specified by a container object. + // TODO: make an Extract method instead of passing `decompress` + // TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used + // with Context.Walk + //ContainerCopy(name string, res string) (io.ReadCloser, error) + // TODO: use copyBackend api + CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error +} + +// Image represents a Docker image used by the builder. +type Image interface { + ImageID() string + RunConfig() *container.Config +} + +// ImageCache abstracts an image cache store. +// (parent image, child runconfig) -> child image +type ImageCache interface { + // GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` + // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. + GetCachedImageOnBuild(parentID string, cfg *container.Config) (imageID string, err error) +} diff --git a/vendor/github.com/docker/docker/builder/context.go b/vendor/github.com/docker/docker/builder/context.go new file mode 100644 index 00000000..7786c56e --- /dev/null +++ b/vendor/github.com/docker/docker/builder/context.go @@ -0,0 +1,260 @@ +package builder + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "github.com/docker/containerd/subreaper/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/gitutils" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" +) + +// ValidateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read +// symlinks which point to non-existing files don't trigger an error +func ValidateContextDirectory(srcPath string, excludes []string) error { + contextRoot, err := getContextRoot(srcPath) + if err != nil { + return err + } + return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return nil + } + return err + } + + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { + return err + } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { + return err + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return fmt.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +// GetContextFromReader will read the contents of the given reader as either a +// Dockerfile or tar archive. Returns a tar archive used as a context and a +// path to the Dockerfile inside the tar. +func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) { + buf := bufio.NewReader(r) + + magic, err := buf.Peek(archive.HeaderSize) + if err != nil && err != io.EOF { + return nil, "", fmt.Errorf("failed to peek context header from STDIN: %v", err) + } + + if archive.IsArchive(magic) { + return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil + } + + // Input should be read as a Dockerfile. + tmpDir, err := ioutil.TempDir("", "docker-build-context-") + if err != nil { + return nil, "", fmt.Errorf("unbale to create temporary context directory: %v", err) + } + + f, err := os.Create(filepath.Join(tmpDir, DefaultDockerfileName)) + if err != nil { + return nil, "", err + } + _, err = io.Copy(f, buf) + if err != nil { + f.Close() + return nil, "", err + } + + if err := f.Close(); err != nil { + return nil, "", err + } + if err := r.Close(); err != nil { + return nil, "", err + } + + tar, err := archive.Tar(tmpDir, archive.Uncompressed) + if err != nil { + return nil, "", err + } + + return ioutils.NewReadCloserWrapper(tar, func() error { + err := tar.Close() + os.RemoveAll(tmpDir) + return err + }), DefaultDockerfileName, nil + +} + +// GetContextFromGitURL uses a Git URL as context for a `docker build`. The +// git repo is cloned into a temporary directory used as the context directory. +// Returns the absolute path to the temporary context directory, the relative +// path of the dockerfile in that context directory, and a non-nil error on +// success. +func GetContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { + if _, err := exec.LookPath("git"); err != nil { + return "", "", fmt.Errorf("unable to find 'git': %v", err) + } + if absContextDir, err = gitutils.Clone(gitURL); err != nil { + return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err) + } + + return getDockerfileRelPath(absContextDir, dockerfileName) +} + +// GetContextFromURL uses a remote URL as context for a `docker build`. The +// remote resource is downloaded as either a Dockerfile or a tar archive. +// Returns the tar archive used for the context and a path of the +// dockerfile inside the tar. +func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { + response, err := httputils.Download(remoteURL) + if err != nil { + return nil, "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err) + } + progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true) + + // Pass the response body through a progress reader. + progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) + + return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) +} + +// GetContextFromLocalDir uses the given local directory as context for a +// `docker build`. Returns the absolute path to the local context directory, +// the relative path of the dockerfile in that context directory, and a non-nil +// error on success. +func GetContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) { + // When using a local context directory, when the Dockerfile is specified + // with the `-f/--file` option then it is considered relative to the + // current directory and not the context directory. + if dockerfileName != "" { + if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { + return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err) + } + } + + return getDockerfileRelPath(localDir, dockerfileName) +} + +// getDockerfileRelPath uses the given context directory for a `docker build` +// and returns the absolute path to the context directory, the relative path of +// the dockerfile in that context directory, and a non-nil error on success. +func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) { + if absContextDir, err = filepath.Abs(givenContextDir); err != nil { + return "", "", fmt.Errorf("unable to get absolute context directory: %v", err) + } + + // The context dir might be a symbolic link, so follow it to the actual + // target directory. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absContextDir) { + absContextDir, err = filepath.EvalSymlinks(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err) + } + } + + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err) + } + + if !stat.IsDir() { + return "", "", fmt.Errorf("context must be a directory: %s", absContextDir) + } + + absDockerfile := givenDockerfile + if absDockerfile == "" { + // No -f/--file was specified so use the default relative to the + // context directory. + absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName) + + // Just to be nice ;-) look for 'dockerfile' too but only + // use it if we found it, otherwise ignore this check + if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { + altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName)) + if _, err = os.Lstat(altPath); err == nil { + absDockerfile = altPath + } + } + } + + // If not already an absolute path, the Dockerfile path should be joined to + // the base directory. + if !filepath.IsAbs(absDockerfile) { + absDockerfile = filepath.Join(absContextDir, absDockerfile) + } + + // Evaluate symlinks in the path to the Dockerfile too. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absDockerfile) { + absDockerfile, err = filepath.EvalSymlinks(absDockerfile) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) + } + } + + if _, err := os.Lstat(absDockerfile); err != nil { + if os.IsNotExist(err) { + return "", "", fmt.Errorf("Cannot locate Dockerfile: %q", absDockerfile) + } + return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err) + } + + if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil { + return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err) + } + + if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { + return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir) + } + + return absContextDir, relDockerfile, nil +} + +// isUNC returns true if the path is UNC (one starting \\). It always returns +// false on Linux. +func isUNC(path string) bool { + return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) +} diff --git a/vendor/github.com/docker/docker/utils/utils_unix.go b/vendor/github.com/docker/docker/builder/context_unix.go similarity index 90% rename from vendor/github.com/docker/docker/utils/utils_unix.go rename to vendor/github.com/docker/docker/builder/context_unix.go index 86bfb770..d1f72e05 100644 --- a/vendor/github.com/docker/docker/utils/utils_unix.go +++ b/vendor/github.com/docker/docker/builder/context_unix.go @@ -1,6 +1,6 @@ // +build !windows -package utils +package builder import ( "path/filepath" diff --git a/vendor/github.com/docker/docker/builder/dockerfile/bflag.go b/vendor/github.com/docker/docker/builder/dockerfile/bflag.go new file mode 100644 index 00000000..c2e6c7da --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/bflag.go @@ -0,0 +1,176 @@ +package dockerfile + +import ( + "fmt" + "strings" +) + +// FlagType is the type of the build flag +type FlagType int + +const ( + boolType FlagType = iota + stringType +) + +// BFlags contains all flags information for the builder +type BFlags struct { + Args []string // actual flags/args from cmd line + flags map[string]*Flag + used map[string]*Flag + Err error +} + +// Flag contains all information for a flag +type Flag struct { + bf *BFlags + name string + flagType FlagType + Value string +} + +// NewBFlags return the new BFlags struct +func NewBFlags() *BFlags { + return &BFlags{ + flags: make(map[string]*Flag), + used: make(map[string]*Flag), + } +} + +// AddBool adds a bool flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddBool(name string, def bool) *Flag { + flag := bf.addFlag(name, boolType) + if flag == nil { + return nil + } + if def { + flag.Value = "true" + } else { + flag.Value = "false" + } + return flag +} + +// AddString adds a string flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddString(name string, def string) *Flag { + flag := bf.addFlag(name, stringType) + if flag == nil { + return nil + } + flag.Value = def + return flag +} + +// addFlag is a generic func used by the other AddXXX() func +// to add a new flag to the BFlags struct. +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { + if _, ok := bf.flags[name]; ok { + bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) + return nil + } + + newFlag := &Flag{ + bf: bf, + name: name, + flagType: flagType, + } + bf.flags[name] = newFlag + + return newFlag +} + +// IsUsed checks if the flag is used +func (fl *Flag) IsUsed() bool { + if _, ok := fl.bf.used[fl.name]; ok { + return true + } + return false +} + +// IsTrue checks if a bool flag is true +func (fl *Flag) IsTrue() bool { + if fl.flagType != boolType { + // Should never get here + panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) + } + return fl.Value == "true" +} + +// Parse parses and checks if the BFlags is valid. +// Any error noticed during the AddXXX() funcs will be generated/returned +// here. We do this because an error during AddXXX() is more like a +// compile time error so it doesn't matter too much when we stop our +// processing as long as we do stop it, so this allows the code +// around AddXXX() to be just: +// defFlag := AddString("description", "") +// w/o needing to add an if-statement around each one. +func (bf *BFlags) Parse() error { + // If there was an error while defining the possible flags + // go ahead and bubble it back up here since we didn't do it + // earlier in the processing + if bf.Err != nil { + return fmt.Errorf("Error setting up flags: %s", bf.Err) + } + + for _, arg := range bf.Args { + if !strings.HasPrefix(arg, "--") { + return fmt.Errorf("Arg should start with -- : %s", arg) + } + + if arg == "--" { + return nil + } + + arg = arg[2:] + value := "" + + index := strings.Index(arg, "=") + if index >= 0 { + value = arg[index+1:] + arg = arg[:index] + } + + flag, ok := bf.flags[arg] + if !ok { + return fmt.Errorf("Unknown flag: %s", arg) + } + + if _, ok = bf.used[arg]; ok { + return fmt.Errorf("Duplicate flag specified: %s", arg) + } + + bf.used[arg] = flag + + switch flag.flagType { + case boolType: + // value == "" is only ok if no "=" was specified + if index >= 0 && value == "" { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + + lower := strings.ToLower(value) + if lower == "" { + flag.Value = "true" + } else if lower == "true" || lower == "false" { + flag.Value = lower + } else { + return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) + } + + case stringType: + if index < 0 { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + flag.Value = value + + default: + panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!")) + } + + } + + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder.go b/vendor/github.com/docker/docker/builder/dockerfile/builder.go new file mode 100644 index 00000000..daa70498 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder.go @@ -0,0 +1,326 @@ +package dockerfile + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "golang.org/x/net/context" +) + +var validCommitCommands = map[string]bool{ + "cmd": true, + "entrypoint": true, + "env": true, + "expose": true, + "label": true, + "onbuild": true, + "user": true, + "volume": true, + "workdir": true, +} + +// BuiltinAllowedBuildArgs is list of built-in allowed build args +var BuiltinAllowedBuildArgs = map[string]bool{ + "HTTP_PROXY": true, + "http_proxy": true, + "HTTPS_PROXY": true, + "https_proxy": true, + "FTP_PROXY": true, + "ftp_proxy": true, + "NO_PROXY": true, + "no_proxy": true, +} + +// Builder is a Dockerfile builder +// It implements the builder.Backend interface. +type Builder struct { + options *types.ImageBuildOptions + + Stdout io.Writer + Stderr io.Writer + Output io.Writer + + docker builder.Backend + context builder.Context + clientCtx context.Context + + dockerfile *parser.Node + runConfig *container.Config // runconfig for cmd, run, entrypoint etc. + flags *BFlags + tmpContainers map[string]struct{} + image string // imageID + noBaseImage bool + maintainer string + cmdSet bool + disableCommit bool + cacheBusted bool + cancelled chan struct{} + cancelOnce sync.Once + allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'. + + // TODO: remove once docker.Commit can receive a tag + id string +} + +// BuildManager implements builder.Backend and is shared across all Builder objects. +type BuildManager struct { + backend builder.Backend +} + +// NewBuildManager creates a BuildManager. +func NewBuildManager(b builder.Backend) (bm *BuildManager) { + return &BuildManager{backend: b} +} + +// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config. +// If dockerfile is nil, the Dockerfile specified by Config.DockerfileName, +// will be read from the Context passed to Build(). +func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, backend builder.Backend, context builder.Context, dockerfile io.ReadCloser) (b *Builder, err error) { + if config == nil { + config = new(types.ImageBuildOptions) + } + if config.BuildArgs == nil { + config.BuildArgs = make(map[string]string) + } + b = &Builder{ + clientCtx: clientCtx, + options: config, + Stdout: os.Stdout, + Stderr: os.Stderr, + docker: backend, + context: context, + runConfig: new(container.Config), + tmpContainers: map[string]struct{}{}, + cancelled: make(chan struct{}), + id: stringid.GenerateNonCryptoID(), + allowedBuildArgs: make(map[string]bool), + } + if dockerfile != nil { + b.dockerfile, err = parser.Parse(dockerfile) + if err != nil { + return nil, err + } + } + + return b, nil +} + +// sanitizeRepoAndTags parses the raw "t" parameter received from the client +// to a slice of repoAndTag. +// It also validates each repoName and tag. +func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { + var ( + repoAndTags []reference.Named + // This map is used for deduplicating the "-t" parameter. + uniqNames = make(map[string]struct{}) + ) + for _, repo := range names { + if repo == "" { + continue + } + + ref, err := reference.ParseNamed(repo) + if err != nil { + return nil, err + } + + ref = reference.WithDefaultTag(ref) + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("build tag cannot contain a digest") + } + + if _, isTagged := ref.(reference.NamedTagged); !isTagged { + ref, err = reference.WithTag(ref, reference.DefaultTag) + if err != nil { + return nil, err + } + } + + nameWithTag := ref.String() + + if _, exists := uniqNames[nameWithTag]; !exists { + uniqNames[nameWithTag] = struct{}{} + repoAndTags = append(repoAndTags, ref) + } + } + return repoAndTags, nil +} + +// Build creates a NewBuilder, which builds the image. +func (bm *BuildManager) Build(clientCtx context.Context, config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error) { + b, err := NewBuilder(clientCtx, config, bm.backend, context, nil) + if err != nil { + return "", err + } + img, err := b.build(config, context, stdout, stderr, out, clientGone) + return img, err + +} + +// build runs the Dockerfile builder from a context and a docker object that allows to make calls +// to Docker. +// +// This will (barring errors): +// +// * read the dockerfile from context +// * parse the dockerfile if not already parsed +// * walk the AST and execute it by dispatching to handlers. If Remove +// or ForceRemove is set, additional cleanup around containers happens after +// processing. +// * Tag image, if applicable. +// * Print a happy message and return the image ID. +// +func (b *Builder) build(config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error) { + b.options = config + b.context = context + b.Stdout = stdout + b.Stderr = stderr + b.Output = out + + // If Dockerfile was not parsed yet, extract it from the Context + if b.dockerfile == nil { + if err := b.readDockerfile(); err != nil { + return "", err + } + } + + finished := make(chan struct{}) + defer close(finished) + go func() { + select { + case <-finished: + case <-clientGone: + b.cancelOnce.Do(func() { + close(b.cancelled) + }) + } + + }() + + repoAndTags, err := sanitizeRepoAndTags(config.Tags) + if err != nil { + return "", err + } + + if len(b.options.Labels) > 0 { + line := "LABEL " + for k, v := range b.options.Labels { + line += fmt.Sprintf("%q=%q ", k, v) + } + _, node, err := parser.ParseLine(line) + if err != nil { + return "", err + } + b.dockerfile.Children = append(b.dockerfile.Children, node) + } + + var shortImgID string + for i, n := range b.dockerfile.Children { + select { + case <-b.cancelled: + logrus.Debug("Builder: build cancelled!") + fmt.Fprintf(b.Stdout, "Build cancelled") + return "", fmt.Errorf("Build cancelled") + default: + // Not cancelled yet, keep going... + } + if err := b.dispatch(i, n); err != nil { + if b.options.ForceRemove { + b.clearTmp() + } + return "", err + } + + shortImgID = stringid.TruncateID(b.image) + fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID) + if b.options.Remove { + b.clearTmp() + } + } + + // check if there are any leftover build-args that were passed but not + // consumed during build. Return an error, if there are any. + leftoverArgs := []string{} + for arg := range b.options.BuildArgs { + if !b.isBuildArgAllowed(arg) { + leftoverArgs = append(leftoverArgs, arg) + } + } + if len(leftoverArgs) > 0 { + return "", fmt.Errorf("One or more build-args %v were not consumed, failing build.", leftoverArgs) + } + + if b.image == "" { + return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?") + } + + for _, rt := range repoAndTags { + if err := b.docker.TagImage(rt, b.image); err != nil { + return "", err + } + } + + fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID) + return b.image, nil +} + +// Cancel cancels an ongoing Dockerfile build. +func (b *Builder) Cancel() { + b.cancelOnce.Do(func() { + close(b.cancelled) + }) +} + +// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile +// It will: +// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. +// - Do build by calling builder.dispatch() to call all entries' handling routines +// +// BuildFromConfig is used by the /commit endpoint, with the changes +// coming from the query parameter of the same name. +// +// TODO: Remove? +func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) { + ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) + if err != nil { + return nil, err + } + + // ensure that the commands are valid + for _, n := range ast.Children { + if !validCommitCommands[n.Value] { + return nil, fmt.Errorf("%s is not a valid change command", n.Value) + } + } + + b, err := NewBuilder(context.Background(), nil, nil, nil, nil) + if err != nil { + return nil, err + } + b.runConfig = config + b.Stdout = ioutil.Discard + b.Stderr = ioutil.Discard + b.disableCommit = true + + for i, n := range ast.Children { + if err := b.dispatch(i, n); err != nil { + return nil, err + } + } + + return b.runConfig, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/command/command.go b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go new file mode 100644 index 00000000..9e1b799d --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go @@ -0,0 +1,42 @@ +// Package command contains the set of Dockerfile commands. +package command + +// Define constants for the command strings +const ( + Env = "env" + Label = "label" + Maintainer = "maintainer" + Add = "add" + Copy = "copy" + From = "from" + Onbuild = "onbuild" + Workdir = "workdir" + Run = "run" + Cmd = "cmd" + Entrypoint = "entrypoint" + Expose = "expose" + Volume = "volume" + User = "user" + StopSignal = "stopsignal" + Arg = "arg" +) + +// Commands is list of all Dockerfile commands +var Commands = map[string]struct{}{ + Env: {}, + Label: {}, + Maintainer: {}, + Add: {}, + Copy: {}, + From: {}, + Onbuild: {}, + Workdir: {}, + Run: {}, + Cmd: {}, + Entrypoint: {}, + Expose: {}, + Volume: {}, + User: {}, + StopSignal: {}, + Arg: {}, +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go new file mode 100644 index 00000000..ac7c2b07 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go @@ -0,0 +1,639 @@ +package dockerfile + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "runtime" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func env(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("ENV") + } + + if len(args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("ENV") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + // TODO/FIXME/NOT USED + // Just here to show how to use the builder flags stuff within the + // context of a builder command. Will remove once we actually add + // a builder command to something! + /* + flBool1 := b.flags.AddBool("bool1", false) + flStr1 := b.flags.AddString("str1", "HI") + + if err := b.flags.Parse(); err != nil { + return err + } + + fmt.Printf("Bool1:%v\n", flBool1) + fmt.Printf("Str1:%v\n", flStr1) + */ + + commitStr := "ENV" + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + gotOne := false + for i, envVar := range b.runConfig.Env { + envParts := strings.SplitN(envVar, "=", 2) + if envParts[0] == args[j] { + b.runConfig.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + b.runConfig.Env = append(b.runConfig.Env, newVar) + } + j++ + } + + return b.commit("", b.runConfig.Cmd, commitStr) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("MAINTAINER") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + b.maintainer = args[0] + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) +} + +// LABEL some json data describing the image +// +// Sets the Label variable foo to bar, +// +func label(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("LABEL") + } + if len(args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("LABEL") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + commitStr := "LABEL" + + if b.runConfig.Labels == nil { + b.runConfig.Labels = map[string]string{} + } + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + b.runConfig.Labels[args[j]] = args[j+1] + j++ + } + return b.commit("", b.runConfig.Cmd, commitStr) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func add(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return errAtLeastOneArgument("ADD") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + return b.runContextCommand(args, true, true, "ADD") +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return errAtLeastOneArgument("COPY") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + return b.runContextCommand(args, false, false, "COPY") +} + +// FROM imagename +// +// This sets the image the dockerfile will build on top of. +// +func from(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("FROM") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + name := args[0] + + var ( + image builder.Image + err error + ) + + // Windows cannot support a container with no base image. + if name == api.NoBaseImageSpecifier { + if runtime.GOOS == "windows" { + return fmt.Errorf("Windows does not support FROM scratch") + } + b.image = "" + b.noBaseImage = true + } else { + // TODO: don't use `name`, instead resolve it to a digest + if !b.options.PullParent { + image, err = b.docker.GetImageOnBuild(name) + // TODO: shouldn't we error out if error is different from "not found" ? + } + if image == nil { + image, err = b.docker.PullOnBuild(b.clientCtx, name, b.options.AuthConfigs, b.Output) + if err != nil { + return err + } + } + } + + return b.processImageFrom(image) +} + +// ONBUILD RUN echo yo +// +// ONBUILD triggers run when the image is used in a FROM statement. +// +// ONBUILD handling has a lot of special-case functionality, the heading in +// evaluator.go and comments around dispatch() in the same file explain the +// special cases. search for 'OnBuild' in internals.go for additional special +// cases. +// +func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("ONBUILD") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) + switch triggerInstruction { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") + + b.runConfig.OnBuild = append(b.runConfig.OnBuild, original) + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ONBUILD %s", original)) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func workdir(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("WORKDIR") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + // This is from the Dockerfile and will not necessarily be in platform + // specific semantics, hence ensure it is converted. + workdir := filepath.FromSlash(args[0]) + + if !system.IsAbs(workdir) { + current := filepath.FromSlash(b.runConfig.WorkingDir) + workdir = filepath.Join(string(os.PathSeparator), current, workdir) + } + + b.runConfig.WorkingDir = workdir + + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is +// only one argument. The difference in processing: +// +// RUN echo hi # sh -c echo hi (Linux) +// RUN echo hi # cmd /S /C echo hi (Windows) +// RUN [ "echo", "hi" ] # echo hi +// +func run(b *Builder, args []string, attributes map[string]bool, original string) error { + if b.image == "" && !b.noBaseImage { + return fmt.Errorf("Please provide a source image with `from` prior to run") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + args = handleJSONArgs(args, attributes) + + if !attributes["json"] { + if runtime.GOOS != "windows" { + args = append([]string{"/bin/sh", "-c"}, args...) + } else { + args = append([]string{"cmd", "/S", "/C"}, args...) + } + } + + config := &container.Config{ + Cmd: strslice.StrSlice(args), + Image: b.image, + } + + // stash the cmd + cmd := b.runConfig.Cmd + if len(b.runConfig.Entrypoint) == 0 && len(b.runConfig.Cmd) == 0 { + b.runConfig.Cmd = config.Cmd + } + + // stash the config environment + env := b.runConfig.Env + + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + defer func(env []string) { b.runConfig.Env = env }(env) + + // derive the net build-time environment for this run. We let config + // environment override the build time environment. + // This means that we take the b.buildArgs list of env vars and remove + // any of those variables that are defined as part of the container. In other + // words, anything in b.Config.Env. What's left is the list of build-time env + // vars that we need to add to each RUN command - note the list could be empty. + // + // We don't persist the build time environment with container's config + // environment, but just sort and prepend it to the command string at time + // of commit. + // This helps with tracing back the image's actual environment at the time + // of RUN, without leaking it to the final image. It also aids cache + // lookup for same image built with same build time environment. + cmdBuildEnv := []string{} + configEnv := runconfigopts.ConvertKVStringsToMap(b.runConfig.Env) + for key, val := range b.options.BuildArgs { + if !b.isBuildArgAllowed(key) { + // skip build-args that are not in allowed list, meaning they have + // not been defined by an "ARG" Dockerfile command yet. + // This is an error condition but only if there is no "ARG" in the entire + // Dockerfile, so we'll generate any necessary errors after we parsed + // the entire file (see 'leftoverArgs' processing in evaluator.go ) + continue + } + if _, ok := configEnv[key]; !ok { + cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, val)) + } + } + + // derive the command to use for probeCache() and to commit in this container. + // Note that we only do this if there are any build-time env vars. Also, we + // use the special argument "|#" at the start of the args array. This will + // avoid conflicts with any RUN command since commands can not + // start with | (vertical bar). The "#" (number of build envs) is there to + // help ensure proper cache matches. We don't want a RUN command + // that starts with "foo=abc" to be considered part of a build-time env var. + saveCmd := config.Cmd + if len(cmdBuildEnv) > 0 { + sort.Strings(cmdBuildEnv) + tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...) + saveCmd = strslice.StrSlice(append(tmpEnv, saveCmd...)) + } + + b.runConfig.Cmd = saveCmd + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + // set Cmd manually, this is special case only for Dockerfiles + b.runConfig.Cmd = config.Cmd + // set build-time environment for 'run'. + b.runConfig.Env = append(b.runConfig.Env, cmdBuildEnv...) + // set config as already being escaped, this prevents double escaping on windows + b.runConfig.ArgsEscaped = true + + logrus.Debugf("[BUILDER] Command to be executed: %v", b.runConfig.Cmd) + + cID, err := b.create() + if err != nil { + return err + } + + if err := b.run(cID); err != nil { + return err + } + + // revert to original config environment and set the command string to + // have the build-time env vars in it (if any) so that future cache look-ups + // properly match it. + b.runConfig.Env = env + b.runConfig.Cmd = saveCmd + return b.commit(cID, cmd, "run") +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + + cmdSlice := handleJSONArgs(args, attributes) + + if !attributes["json"] { + if runtime.GOOS != "windows" { + cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...) + } else { + cmdSlice = append([]string{"cmd", "/S", "/C"}, cmdSlice...) + } + } + + b.runConfig.Cmd = strslice.StrSlice(cmdSlice) + + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { + return err + } + + if len(args) != 0 { + b.cmdSet = true + } + + return nil +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to +// /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx. +// +// Handles command processing similar to CMD and RUN, only b.runConfig.Entrypoint +// is initialized at NewBuilder time instead of through argument parsing. +// +func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + + parsed := handleJSONArgs(args, attributes) + + switch { + case attributes["json"]: + // ENTRYPOINT ["echo", "hi"] + b.runConfig.Entrypoint = strslice.StrSlice(parsed) + case len(parsed) == 0: + // ENTRYPOINT [] + b.runConfig.Entrypoint = nil + default: + // ENTRYPOINT echo hi + if runtime.GOOS != "windows" { + b.runConfig.Entrypoint = strslice.StrSlice{"/bin/sh", "-c", parsed[0]} + } else { + b.runConfig.Entrypoint = strslice.StrSlice{"cmd", "/S", "/C", parsed[0]} + } + } + + // when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + if !b.cmdSet { + b.runConfig.Cmd = nil + } + + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.runConfig.Entrypoint)); err != nil { + return err + } + + return nil +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// b.runConfig.ExposedPorts for runconfig. +// +func expose(b *Builder, args []string, attributes map[string]bool, original string) error { + portsTab := args + + if len(args) == 0 { + return errAtLeastOneArgument("EXPOSE") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + if b.runConfig.ExposedPorts == nil { + b.runConfig.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(portsTab) + if err != nil { + return err + } + + // instead of using ports directly, we build a list of ports and sort it so + // the order is consistent. This prevents cache burst where map ordering + // changes between builds + portList := make([]string, len(ports)) + var i int + for port := range ports { + if _, exists := b.runConfig.ExposedPorts[port]; !exists { + b.runConfig.ExposedPorts[port] = struct{}{} + } + portList[i] = string(port) + i++ + } + sort.Strings(portList) + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func user(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("USER") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + b.runConfig.User = args[0] + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("USER %v", args)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +func volume(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("VOLUME") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + if b.runConfig.Volumes == nil { + b.runConfig.Volumes = map[string]struct{}{} + } + for _, v := range args { + v = strings.TrimSpace(v) + if v == "" { + return fmt.Errorf("Volume specified can not be an empty string") + } + b.runConfig.Volumes[v] = struct{}{} + } + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { + return err + } + return nil +} + +// STOPSIGNAL signal +// +// Set the signal that will be used to kill the container. +func stopSignal(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("STOPSIGNAL requires exactly one argument") + } + + sig := args[0] + _, err := signal.ParseSignal(sig) + if err != nil { + return err + } + + b.runConfig.StopSignal = sig + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("STOPSIGNAL %v", args)) +} + +// ARG name[=value] +// +// Adds the variable foo to the trusted list of variables that can be passed +// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. +// Dockerfile author may optionally set a default value of this variable. +func arg(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("ARG requires exactly one argument definition") + } + + var ( + name string + value string + hasDefault bool + ) + + arg := args[0] + // 'arg' can just be a name or name-value pair. Note that this is different + // from 'env' that handles the split of name and value at the parser level. + // The reason for doing it differently for 'arg' is that we support just + // defining an arg and not assign it a value (while 'env' always expects a + // name-value pair). If possible, it will be good to harmonize the two. + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + name = parts[0] + value = parts[1] + hasDefault = true + } else { + name = arg + hasDefault = false + } + // add the arg to allowed list of build-time args from this step on. + b.allowedBuildArgs[name] = true + + // If there is a default value associated with this arg then add it to the + // b.buildArgs if one is not already passed to the builder. The args passed + // to builder override the default value of 'arg'. + if _, ok := b.options.BuildArgs[name]; !ok && hasDefault { + b.options.BuildArgs[name] = value + } + + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ARG %s", arg)) +} + +func errAtLeastOneArgument(command string) error { + return fmt.Errorf("%s requires at least one argument", command) +} + +func errExactlyOneArgument(command string) error { + return fmt.Errorf("%s requires exactly one argument", command) +} + +func errTooManyArguments(command string) error { + return fmt.Errorf("Bad input to %s, too many arguments", command) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/envVarTest b/vendor/github.com/docker/docker/builder/dockerfile/envVarTest new file mode 100644 index 00000000..1a7fe975 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/envVarTest @@ -0,0 +1,112 @@ +hello | hello +he'll'o | hello +he'llo | hello +he\'llo | he'llo +he\\'llo | he\llo +abc\tdef | abctdef +"abc\tdef" | abc\tdef +'abc\tdef' | abc\tdef +hello\ | hello +hello\\ | hello\ +"hello | hello +"hello\" | hello" +"hel'lo" | hel'lo +'hello | hello +'hello\' | hello\ +"''" | '' +$. | $. +$1 | +he$1x | hex +he$.x | he$.x +he$pwd. | he. +he$PWD | he/home +he\$PWD | he$PWD +he\\$PWD | he\/home +he\${} | he${} +he\${}xx | he${}xx +he${} | he +he${}xx | hexx +he${hi} | he +he${hi}xx | hexx +he${PWD} | he/home +he${.} | error +he${XXX:-000}xx | he000xx +he${PWD:-000}xx | he/homexx +he${XXX:-$PWD}xx | he/homexx +he${XXX:-${PWD:-yyy}}xx | he/homexx +he${XXX:-${YYY:-yyy}}xx | heyyyxx +he${XXX:YYY} | error +he${XXX:+${PWD}}xx | hexx +he${PWD:+${XXX}}xx | hexx +he${PWD:+${SHELL}}xx | hebashxx +he${XXX:+000}xx | hexx +he${PWD:+000}xx | he000xx +'he${XX}' | he${XX} +"he${PWD}" | he/home +"he'$PWD'" | he'/home' +"$PWD" | /home +'$PWD' | $PWD +'\$PWD' | \$PWD +'"hello"' | "hello" +he\$PWD | he$PWD +"he\$PWD" | he$PWD +'he\$PWD' | he\$PWD +he${PWD | error +he${PWD:=000}xx | error +he${PWD:+${PWD}:}xx | he/home:xx +he${XXX:-\$PWD:}xx | he$PWD:xx +he${XXX:-\${PWD}z}xx | he${PWDz}xx +안녕하세요 | 안녕하세요 +안'녕'하세요 | 안녕하세요 +안'녕하세요 | 안녕하세요 +안녕\'하세요 | 안녕'하세요 +안\\'녕하세요 | 안\녕하세요 +안녕\t하세요 | 안녕t하세요 +"안녕\t하세요" | 안녕\t하세요 +'안녕\t하세요 | 안녕\t하세요 +안녕하세요\ | 안녕하세요 +안녕하세요\\ | 안녕하세요\ +"안녕하세요 | 안녕하세요 +"안녕하세요\" | 안녕하세요" +"안녕'하세요" | 안녕'하세요 +'안녕하세요 | 안녕하세요 +'안녕하세요\' | 안녕하세요\ +안녕$1x | 안녕x +안녕$.x | 안녕$.x +안녕$pwd. | 안녕. +안녕$PWD | 안녕/home +안녕\$PWD | 안녕$PWD +안녕\\$PWD | 안녕\/home +안녕\${} | 안녕${} +안녕\${}xx | 안녕${}xx +안녕${} | 안녕 +안녕${}xx | 안녕xx +안녕${hi} | 안녕 +안녕${hi}xx | 안녕xx +안녕${PWD} | 안녕/home +안녕${.} | error +안녕${XXX:-000}xx | 안녕000xx +안녕${PWD:-000}xx | 안녕/homexx +안녕${XXX:-$PWD}xx | 안녕/homexx +안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx +안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx +안녕${XXX:YYY} | error +안녕${XXX:+${PWD}}xx | 안녕xx +안녕${PWD:+${XXX}}xx | 안녕xx +안녕${PWD:+${SHELL}}xx | 안녕bashxx +안녕${XXX:+000}xx | 안녕xx +안녕${PWD:+000}xx | 안녕000xx +'안녕${XX}' | 안녕${XX} +"안녕${PWD}" | 안녕/home +"안녕'$PWD'" | 안녕'/home' +'"안녕"' | "안녕" +안녕\$PWD | 안녕$PWD +"안녕\$PWD" | 안녕$PWD +'안녕\$PWD' | 안녕\$PWD +안녕${PWD | error +안녕${PWD:=000}xx | error +안녕${PWD:+${PWD}:}xx | 안녕/home:xx +안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx +안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx +$KOREAN | 한국어 +안녕$KOREAN | 안녕한국어 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go new file mode 100644 index 00000000..270e3a4f --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go @@ -0,0 +1,215 @@ +// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling NewBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of responsibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package dockerfile + +import ( + "fmt" + "runtime" + "strings" + + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/builder/dockerfile/parser" +) + +// Environment variable interpolation will happen on these statements only. +var replaceEnvAllowed = map[string]bool{ + command.Env: true, + command.Label: true, + command.Add: true, + command.Copy: true, + command.Workdir: true, + command.Expose: true, + command.Volume: true, + command.User: true, + command.StopSignal: true, + command.Arg: true, +} + +// Certain commands are allowed to have their args split into more +// words after env var replacements. Meaning: +// ENV foo="123 456" +// EXPOSE $foo +// should result in the same thing as: +// EXPOSE 123 456 +// and not treat "123 456" as a single word. +// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing. +// Quotes will cause it to still be treated as single word. +var allowWordExpansion = map[string]bool{ + command.Expose: true, +} + +var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error + +func init() { + evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{ + command.Env: env, + command.Label: label, + command.Maintainer: maintainer, + command.Add: add, + command.Copy: dispatchCopy, // copy() is a go builtin + command.From: from, + command.Onbuild: onbuild, + command.Workdir: workdir, + command.Run: run, + command.Cmd: cmd, + command.Entrypoint: entrypoint, + command.Expose: expose, + command.Volume: volume, + command.User: user, + command.StopSignal: stopSignal, + command.Arg: arg, + } +} + +// This method is the entrypoint to all statement handling routines. +// +// Almost all nodes will have this structure: +// Child[Node, Node, Node] where Child is from parser.Node.Children and each +// node comes from parser.Node.Next. This forms a "line" with a statement and +// arguments and we process them in this normalized form by hitting +// evaluateTable with the leaf nodes of the command and the Builder object. +// +// ONBUILD is a special case; in this case the parser will emit: +// Child[Node, Child[Node, Node...]] where the first node is the literal +// "onbuild" and the child entrypoint is the command of the ONBUILD statement, +// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to +// deal with that, at least until it becomes more of a general concern with new +// features. +func (b *Builder) dispatch(stepN int, ast *parser.Node) error { + cmd := ast.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + return err + } + + attrs := ast.Attributes + original := ast.Original + flags := ast.Flags + strList := []string{} + msg := fmt.Sprintf("Step %d : %s", stepN+1, upperCasedCmd) + + if len(ast.Flags) > 0 { + msg += " " + strings.Join(ast.Flags, " ") + } + + if cmd == "onbuild" { + if ast.Next == nil { + return fmt.Errorf("ONBUILD requires at least one argument") + } + ast = ast.Next.Children[0] + strList = append(strList, ast.Value) + msg += " " + ast.Value + + if len(ast.Flags) > 0 { + msg += " " + strings.Join(ast.Flags, " ") + } + + } + + // count the number of nodes that we are going to traverse first + // so we can pre-create the argument and message array. This speeds up the + // allocation of those list a lot when they have a lot of arguments + cursor := ast + var n int + for cursor.Next != nil { + cursor = cursor.Next + n++ + } + msgList := make([]string, n) + + var i int + // Append the build-time args to config-environment. + // This allows builder config to override the variables, making the behavior similar to + // a shell script i.e. `ENV foo bar` overrides value of `foo` passed in build + // context. But `ENV foo $foo` will use the value from build context if one + // isn't already been defined by a previous ENV primitive. + // Note, we get this behavior because we know that ProcessWord() will + // stop on the first occurrence of a variable name and not notice + // a subsequent one. So, putting the buildArgs list after the Config.Env + // list, in 'envs', is safe. + envs := b.runConfig.Env + for key, val := range b.options.BuildArgs { + if !b.isBuildArgAllowed(key) { + // skip build-args that are not in allowed list, meaning they have + // not been defined by an "ARG" Dockerfile command yet. + // This is an error condition but only if there is no "ARG" in the entire + // Dockerfile, so we'll generate any necessary errors after we parsed + // the entire file (see 'leftoverArgs' processing in evaluator.go ) + continue + } + envs = append(envs, fmt.Sprintf("%s=%s", key, val)) + } + for ast.Next != nil { + ast = ast.Next + var str string + str = ast.Value + if replaceEnvAllowed[cmd] { + var err error + var words []string + + if allowWordExpansion[cmd] { + words, err = ProcessWords(str, envs) + if err != nil { + return err + } + strList = append(strList, words...) + } else { + str, err = ProcessWord(str, envs) + if err != nil { + return err + } + strList = append(strList, str) + } + } else { + strList = append(strList, str) + } + msgList[i] = ast.Value + i++ + } + + msg += " " + strings.Join(msgList, " ") + fmt.Fprintln(b.Stdout, msg) + + // XXX yes, we skip any cmds that are not valid; the parser should have + // picked these out already. + if f, ok := evaluateTable[cmd]; ok { + b.flags = NewBFlags() + b.flags.Args = flags + return f(b, strList, attrs, original) + } + + return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) +} + +// platformSupports is a short-term function to give users a quality error +// message if a Dockerfile uses a command not supported on the platform. +func platformSupports(command string) error { + if runtime.GOOS != "windows" { + return nil + } + switch command { + case "expose", "user", "stopsignal", "arg": + return fmt.Errorf("The daemon on this platform does not support the command '%s'", command) + } + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals.go b/vendor/github.com/docker/docker/builder/dockerfile/internals.go new file mode 100644 index 00000000..a9be9fdd --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals.go @@ -0,0 +1,662 @@ +package dockerfile + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/strslice" +) + +func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) error { + if b.disableCommit { + return nil + } + if b.image == "" && !b.noBaseImage { + return fmt.Errorf("Please provide a source image with `from` prior to commit") + } + b.runConfig.Image = b.image + + if id == "" { + cmd := b.runConfig.Cmd + if runtime.GOOS != "windows" { + b.runConfig.Cmd = strslice.StrSlice{"/bin/sh", "-c", "#(nop) " + comment} + } else { + b.runConfig.Cmd = strslice.StrSlice{"cmd", "/S /C", "REM (nop) " + comment} + } + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } else if hit { + return nil + } + id, err = b.create() + if err != nil { + return err + } + } + + // Note: Actually copy the struct + autoConfig := *b.runConfig + autoConfig.Cmd = autoCmd + + commitCfg := &types.ContainerCommitConfig{ + Author: b.maintainer, + Pause: true, + Config: &autoConfig, + } + + // Commit the container + imageID, err := b.docker.Commit(id, commitCfg) + if err != nil { + return err + } + + b.image = imageID + return nil +} + +type copyInfo struct { + builder.FileInfo + decompress bool +} + +func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { + if b.context == nil { + return fmt.Errorf("No context given. Impossible to use %s", cmdName) + } + + if len(args) < 2 { + return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) + } + + // Work in daemon-specific filepath semantics + dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest + + b.runConfig.Image = b.image + + var infos []copyInfo + + // Loop through each src file and calculate the info we need to + // do the copy (e.g. hash value if cached). Don't actually do + // the copy until we've looked at all src files + var err error + for _, orig := range args[0 : len(args)-1] { + var fi builder.FileInfo + decompress := allowLocalDecompression + if urlutil.IsURL(orig) { + if !allowRemote { + return fmt.Errorf("Source can't be a URL for %s", cmdName) + } + fi, err = b.download(orig) + if err != nil { + return err + } + defer os.RemoveAll(filepath.Dir(fi.Path())) + decompress = false + infos = append(infos, copyInfo{fi, decompress}) + continue + } + // not a URL + subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) + if err != nil { + return err + } + + infos = append(infos, subInfos...) + } + + if len(infos) == 0 { + return fmt.Errorf("No source files were specified") + } + if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { + return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + + // For backwards compat, if there's just one info then use it as the + // cache look-up string, otherwise hash 'em all into one + var srcHash string + var origPaths string + + if len(infos) == 1 { + fi := infos[0].FileInfo + origPaths = fi.Name() + if hfi, ok := fi.(builder.Hashed); ok { + srcHash = hfi.Hash() + } + } else { + var hashs []string + var origs []string + for _, info := range infos { + fi := info.FileInfo + origs = append(origs, fi.Name()) + if hfi, ok := fi.(builder.Hashed); ok { + hashs = append(hashs, hfi.Hash()) + } + } + hasher := sha256.New() + hasher.Write([]byte(strings.Join(hashs, ","))) + srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) + origPaths = strings.Join(origs, " ") + } + + cmd := b.runConfig.Cmd + if runtime.GOOS != "windows" { + b.runConfig.Cmd = strslice.StrSlice{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)} + } else { + b.runConfig.Cmd = strslice.StrSlice{"cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest)} + } + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + if hit, err := b.probeCache(); err != nil { + return err + } else if hit { + return nil + } + + container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) + if err != nil { + return err + } + b.tmpContainers[container.ID] = struct{}{} + + comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) + + // Twiddle the destination when its a relative path - meaning, make it + // relative to the WORKINGDIR + if !system.IsAbs(dest) { + hasSlash := strings.HasSuffix(dest, string(os.PathSeparator)) + dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest) + + // Make sure we preserve any trailing slash + if hasSlash { + dest += string(os.PathSeparator) + } + } + + for _, info := range infos { + if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil { + return err + } + } + + return b.commit(container.ID, cmd, comment) +} + +func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { + // get filename from URL + u, err := url.Parse(srcURL) + if err != nil { + return + } + path := filepath.FromSlash(u.Path) // Ensure in platform semantics + if strings.HasSuffix(path, string(os.PathSeparator)) { + path = path[:len(path)-1] + } + parts := strings.Split(path, string(os.PathSeparator)) + filename := parts[len(parts)-1] + if filename == "" { + err = fmt.Errorf("cannot determine filename from url: %s", u) + return + } + + // Initiate the download + resp, err := httputils.Download(srcURL) + if err != nil { + return + } + + // Prepare file in a tmp dir + tmpDir, err := ioutils.TempDir("", "docker-remote") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + tmpFileName := filepath.Join(tmpDir, filename) + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return + } + + stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) + progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) + progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") + // Download and dump result to tmp file + if _, err = io.Copy(tmpFile, progressReader); err != nil { + tmpFile.Close() + return + } + fmt.Fprintln(b.Stdout) + // ignoring error because the file was already opened successfully + tmpFileSt, err := tmpFile.Stat() + if err != nil { + return + } + tmpFile.Close() + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + mTime := time.Time{} + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { + return + } + + // Calc the checksum, even if we're using the cache + r, err := archive.Tar(tmpFileName, archive.Uncompressed) + if err != nil { + return + } + tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) + if err != nil { + return + } + if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { + return + } + hash := tarSum.Sum(nil) + r.Close() + return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil +} + +func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) { + + // Work in daemon-specific OS filepath semantics + origPath = filepath.FromSlash(origPath) + + if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { + origPath = origPath[1:] + } + origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) + + // Deal with wildcards + if allowWildcards && containsWildcards(origPath) { + var copyInfos []copyInfo + if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error { + if err != nil { + return err + } + if info.Name() == "" { + // Why are we doing this check? + return nil + } + if match, _ := filepath.Match(origPath, path); !match { + return nil + } + + // Note we set allowWildcards to false in case the name has + // a * in it + subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false) + if err != nil { + return err + } + copyInfos = append(copyInfos, subInfos...) + return nil + }); err != nil { + return nil, err + } + return copyInfos, nil + } + + // Must be a dir or a file + + statPath, fi, err := b.context.Stat(origPath) + if err != nil { + return nil, err + } + + copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}} + + hfi, handleHash := fi.(builder.Hashed) + if !handleHash { + return copyInfos, nil + } + + // Deal with the single file case + if !fi.IsDir() { + hfi.SetHash("file:" + hfi.Hash()) + return copyInfos, nil + } + // Must be a dir + var subfiles []string + err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { + if err != nil { + return err + } + // we already checked handleHash above + subfiles = append(subfiles, info.(builder.Hashed).Hash()) + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(subfiles) + hasher := sha256.New() + hasher.Write([]byte(strings.Join(subfiles, ","))) + hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil))) + + return copyInfos, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func (b *Builder) processImageFrom(img builder.Image) error { + if img != nil { + b.image = img.ImageID() + + if img.RunConfig() != nil { + b.runConfig = img.RunConfig() + } + } + + // Check to see if we have a default PATH, note that windows won't + // have one as its set by HCS + if system.DefaultPathEnv != "" { + // Convert the slice of strings that represent the current list + // of env vars into a map so we can see if PATH is already set. + // If its not set then go ahead and give it our default value + configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env) + if _, ok := configEnv["PATH"]; !ok { + b.runConfig.Env = append(b.runConfig.Env, + "PATH="+system.DefaultPathEnv) + } + } + + if img == nil { + // Typically this means they used "FROM scratch" + return nil + } + + // Process ONBUILD triggers if they exist + if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 { + word := "trigger" + if nTriggers > 1 { + word = "triggers" + } + fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. + onBuildTriggers := b.runConfig.OnBuild + b.runConfig.OnBuild = []string{} + + // parse the ONBUILD triggers by invoking the parser + for _, step := range onBuildTriggers { + ast, err := parser.Parse(strings.NewReader(step)) + if err != nil { + return err + } + + for i, n := range ast.Children { + switch strings.ToUpper(n.Value) { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) + } + + if err := b.dispatch(i, n); err != nil { + return err + } + } + } + + return nil +} + +// probeCache checks if `b.docker` implements builder.ImageCache and image-caching +// is enabled (`b.UseCache`). +// If so attempts to look up the current `b.image` and `b.runConfig` pair with `b.docker`. +// If an image is found, probeCache returns `(true, nil)`. +// If no image is found, it returns `(false, nil)`. +// If there is any error, it returns `(false, err)`. +func (b *Builder) probeCache() (bool, error) { + c, ok := b.docker.(builder.ImageCache) + if !ok || b.options.NoCache || b.cacheBusted { + return false, nil + } + cache, err := c.GetCachedImageOnBuild(b.image, b.runConfig) + if err != nil { + return false, err + } + if len(cache) == 0 { + logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd) + b.cacheBusted = true + return false, nil + } + + fmt.Fprintf(b.Stdout, " ---> Using cache\n") + logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd) + b.image = string(cache) + + return true, nil +} + +func (b *Builder) create() (string, error) { + if b.image == "" && !b.noBaseImage { + return "", fmt.Errorf("Please provide a source image with `from` prior to run") + } + b.runConfig.Image = b.image + + resources := container.Resources{ + CgroupParent: b.options.CgroupParent, + CPUShares: b.options.CPUShares, + CPUPeriod: b.options.CPUPeriod, + CPUQuota: b.options.CPUQuota, + CpusetCpus: b.options.CPUSetCPUs, + CpusetMems: b.options.CPUSetMems, + Memory: b.options.Memory, + MemorySwap: b.options.MemorySwap, + Ulimits: b.options.Ulimits, + } + + // TODO: why not embed a hostconfig in builder? + hostConfig := &container.HostConfig{ + Isolation: b.options.Isolation, + ShmSize: b.options.ShmSize, + Resources: resources, + } + + config := *b.runConfig + + // Create the container + c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ + Config: b.runConfig, + HostConfig: hostConfig, + }) + if err != nil { + return "", err + } + for _, warning := range c.Warnings { + fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) + } + + b.tmpContainers[c.ID] = struct{}{} + fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) + + // override the entry point that may have been picked up from the base image + if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd); err != nil { + return "", err + } + + return c.ID, nil +} + +func (b *Builder) run(cID string) (err error) { + errCh := make(chan error) + go func() { + errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true) + }() + + finished := make(chan struct{}) + defer close(finished) + go func() { + select { + case <-b.cancelled: + logrus.Debugln("Build cancelled, killing and removing container:", cID) + b.docker.ContainerKill(cID, 0) + b.removeContainer(cID) + case <-finished: + } + }() + + if err := b.docker.ContainerStart(cID, nil); err != nil { + return err + } + + // Block on reading output from container, stop on err or chan closed + if err := <-errCh; err != nil { + return err + } + + if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 { + // TODO: change error type, because jsonmessage.JSONError assumes HTTP + return &jsonmessage.JSONError{ + Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(b.runConfig.Cmd, " "), ret), + Code: ret, + } + } + + return nil +} + +func (b *Builder) removeContainer(c string) error { + rmConfig := &types.ContainerRmConfig{ + ForceRemove: true, + RemoveVolume: true, + } + if err := b.docker.ContainerRm(c, rmConfig); err != nil { + fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) + return err + } + return nil +} + +func (b *Builder) clearTmp() { + for c := range b.tmpContainers { + if err := b.removeContainer(c); err != nil { + return + } + delete(b.tmpContainers, c) + fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) + } +} + +// readDockerfile reads a Dockerfile from the current context. +func (b *Builder) readDockerfile() error { + // If no -f was specified then look for 'Dockerfile'. If we can't find + // that then look for 'dockerfile'. If neither are found then default + // back to 'Dockerfile' and use that in the error message. + if b.options.Dockerfile == "" { + b.options.Dockerfile = builder.DefaultDockerfileName + if _, _, err := b.context.Stat(b.options.Dockerfile); os.IsNotExist(err) { + lowercase := strings.ToLower(b.options.Dockerfile) + if _, _, err := b.context.Stat(lowercase); err == nil { + b.options.Dockerfile = lowercase + } + } + } + + f, err := b.context.Open(b.options.Dockerfile) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) + } + return err + } + if f, ok := f.(*os.File); ok { + // ignoring error because Open already succeeded + fi, err := f.Stat() + if err != nil { + return fmt.Errorf("Unexpected error reading Dockerfile: %v", err) + } + if fi.Size() == 0 { + return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) + } + } + b.dockerfile, err = parser.Parse(f) + f.Close() + if err != nil { + return err + } + + // After the Dockerfile has been parsed, we need to check the .dockerignore + // file for either "Dockerfile" or ".dockerignore", and if either are + // present then erase them from the build context. These files should never + // have been sent from the client but we did send them to make sure that + // we had the Dockerfile to actually parse, and then we also need the + // .dockerignore file to know whether either file should be removed. + // Note that this assumes the Dockerfile has been read into memory and + // is now safe to be removed. + if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok { + dockerIgnore.Process([]string{b.options.Dockerfile}) + } + return nil +} + +// determine if build arg is part of built-in args or user +// defined args in Dockerfile at any point in time. +func (b *Builder) isBuildArgAllowed(arg string) bool { + if _, ok := BuiltinAllowedBuildArgs[arg]; ok { + return true + } + if _, ok := b.allowedBuildArgs[arg]; ok { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go new file mode 100644 index 00000000..1d7ece43 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go @@ -0,0 +1,331 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "unicode" +) + +var ( + errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// +func parseSubCommand(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + _, child, err := ParseLine(rest) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// helper to parse words (i.e space delimited or quoted strings) in a statement. +// The quotes are preserved as part of this function and they are stripped later +// as part of processWords(). +func parseWords(rest string) []string { + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(rest); pos++ { + if pos != len(rest) { + ch = rune(rest[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(rest) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + phase = inWord // found it, fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(rest)) { + if blankOK || len(word) > 0 { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + } + if ch == '\\' { + if pos+1 == len(rest) { + continue // just skip \ at end + } + // If we're not quoted and we see a \, then always just + // add \ plus the char to the word, even if the char + // is a quote. + word += string(ch) + pos++ + ch = rune(rest[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + } + // \ is special except for ' quotes - can't escape anything for ' + if ch == '\\' && quote != '\'' { + if pos+1 == len(rest) { + phase = inWord + continue // just skip \ at end + } + pos++ + nextCh := rune(rest[pos]) + word += string(ch) + ch = nextCh + } + word += string(ch) + } + } + + return words +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseNameVal(rest string, key string) (*Node, map[string]bool, error) { + // This is kind of tricky because we need to support the old + // variant: KEY name value + // as well as the new one: KEY name=value ... + // The trigger to know which one is being used will be whether we hit + // a space or = first. space ==> old, "=" ==> new + + words := parseWords(rest) + if len(words) == 0 { + return nil, nil, nil + } + + var rootnode *Node + + // Old format (KEY name value) + if !strings.Contains(words[0], "=") { + node := &Node{} + rootnode = node + strs := tokenWhitespace.Split(rest, 2) + + if len(strs) < 2 { + return nil, nil, fmt.Errorf(key + " must have two arguments") + } + + node.Value = strs[0] + node.Next = &Node{} + node.Next.Value = strs[1] + } else { + var prevNode *Node + for i, word := range words { + if !strings.Contains(word, "=") { + return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + } + parts := strings.SplitN(word, "=", 2) + + name := &Node{} + value := &Node{} + + name.Next = value + name.Value = parts[0] + value.Value = parts[1] + + if i == 0 { + rootnode = name + } else { + prevNode.Next = name + } + prevNode = value + } + } + + return rootnode, nil, nil +} + +func parseEnv(rest string) (*Node, map[string]bool, error) { + return parseNameVal(rest, "ENV") +} + +func parseLabel(rest string) (*Node, map[string]bool, error) { + return parseNameVal(rest, "LABEL") +} + +// parses a statement containing one or more keyword definition(s) and/or +// value assignments, like `name1 name2= name3="" name4=value`. +// Note that this is a stricter format than the old format of assignment, +// allowed by parseNameVal(), in a way that this only allows assignment of the +// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. +// In addition, a keyword definition alone is of the form `keyword` like `name1` +// above. And the assignments `name2=` and `name3=""` are equivalent and +// assign an empty value to the respective keywords. +func parseNameOrNameVal(rest string) (*Node, map[string]bool, error) { + words := parseWords(rest) + if len(words) == 0 { + return nil, nil, nil + } + + var ( + rootnode *Node + prevNode *Node + ) + for i, word := range words { + node := &Node{} + node.Value = word + if i == 0 { + rootnode = node + } else { + prevNode.Next = node + } + prevNode = node + } + + return rootnode, nil, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node := &Node{} + rootnode := node + prevnode := node + for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parsestring just wraps the string in quotes and returns a working node. +func parseString(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string) (*Node, map[string]bool, error) { + rest = strings.TrimLeftFunc(rest, unicode.IsSpace) + if !strings.HasPrefix(rest, "[") { + return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) + } + + var myJSON []interface{} + if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { + return nil, nil, err + } + + var top, prev *Node + for _, str := range myJSON { + s, ok := str.(string) + if !ok { + return nil, nil, errDockerfileNotStringArray + } + + node := &Node{Value: s} + if prev == nil { + top = node + } else { + prev.Next = node + } + prev = node + } + + return top, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node, attrs, err := parseJSON(rest) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} + +// parseMaybeJSONToList determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, attempts to parse it as a whitespace +// delimited string. +func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) { + node, attrs, err := parseJSON(rest) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + return parseStringsWhitespaceDelimited(rest) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go new file mode 100644 index 00000000..ece601a9 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go @@ -0,0 +1,161 @@ +// Package parser implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "io" + "regexp" + "strings" + "unicode" + + "github.com/docker/docker/builder/dockerfile/command" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing + Flags []string // only top Node should have this set + StartLine int // the line in the original dockerfile where the node begins + EndLine int // the line in the original dockerfile where the node ends +} + +var ( + dispatch map[string]func(string) (*Node, map[string]bool, error) + tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + tokenLineContinuation = regexp.MustCompile(`\\[ \t]*$`) + tokenComment = regexp.MustCompile(`^#.*$`) +) + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // receives the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propagated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string) (*Node, map[string]bool, error){ + command.User: parseString, + command.Onbuild: parseSubCommand, + command.Workdir: parseString, + command.Env: parseEnv, + command.Label: parseLabel, + command.Maintainer: parseString, + command.From: parseString, + command.Add: parseMaybeJSONToList, + command.Copy: parseMaybeJSONToList, + command.Run: parseMaybeJSON, + command.Cmd: parseMaybeJSON, + command.Entrypoint: parseMaybeJSON, + command.Expose: parseStringsWhitespaceDelimited, + command.Volume: parseMaybeJSONToList, + command.StopSignal: parseString, + command.Arg: parseNameOrNameVal, + } +} + +// ParseLine parse a line and return the remainder. +func ParseLine(line string) (string, *Node, error) { + if line = stripComments(line); line == "" { + return "", nil, nil + } + + if tokenLineContinuation.MatchString(line) { + line = tokenLineContinuation.ReplaceAllString(line, "") + return line, nil, nil + } + + cmd, flags, args, err := splitCommand(line) + if err != nil { + return "", nil, err + } + + node := &Node{} + node.Value = cmd + + sexp, attrs, err := fullDispatch(cmd, args) + if err != nil { + return "", nil, err + } + + node.Next = sexp + node.Attributes = attrs + node.Original = line + node.Flags = flags + + return "", node, nil +} + +// Parse is the main parse routine. +// It handles an io.ReadWriteCloser and returns the root of the AST. +func Parse(rwc io.Reader) (*Node, error) { + currentLine := 0 + root := &Node{} + root.StartLine = -1 + scanner := bufio.NewScanner(rwc) + + for scanner.Scan() { + scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace) + currentLine++ + line, child, err := ParseLine(scannedLine) + if err != nil { + return nil, err + } + startLine := currentLine + + if line != "" && child == nil { + for scanner.Scan() { + newline := scanner.Text() + currentLine++ + + if stripComments(strings.TrimSpace(newline)) == "" { + continue + } + + line, child, err = ParseLine(line + newline) + if err != nil { + return nil, err + } + + if child != nil { + break + } + } + if child == nil && line != "" { + _, child, err = ParseLine(line) + if err != nil { + return nil, err + } + } + } + + if child != nil { + // Update the line information for the current child. + child.StartLine = startLine + child.EndLine = currentLine + // Update the line information for the root. The starting line of the root is always the + // starting line of the first child and the ending line is the ending line of the last child. + if root.StartLine < 0 { + root.StartLine = currentLine + } + root.EndLine = currentLine + root.Children = append(root.Children, child) + } + } + + return root, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go new file mode 100644 index 00000000..b21eb62a --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go @@ -0,0 +1,176 @@ +package parser + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +// Dump dumps the AST defined by `node` as a list of sexps. +// Returns a string suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + if len(node.Flags) > 0 { + str += fmt.Sprintf(" %q", node.Flags) + } + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + if node.Next != nil { + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + strconv.Quote(n.Value) + } + } + } + + return strings.TrimSpace(str) +} + +// performs the dispatch based on the two primal strings, cmd and args. Please +// look at the dispatch table in parser.go to see how these dispatchers work. +func fullDispatch(cmd, args string) (*Node, map[string]bool, error) { + fn := dispatch[cmd] + + // Ignore invalid Dockerfile instructions + if fn == nil { + fn = parseIgnore + } + + sexp, attrs, err := fn(args) + if err != nil { + return nil, nil, err + } + + return sexp, attrs, nil +} + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, []string, string, error) { + var args string + var flags []string + + // Make sure we get the same results irrespective of leading/trailing spaces + cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) + cmd := strings.ToLower(cmdline[0]) + + if len(cmdline) == 2 { + var err error + args, flags, err = extractBuilderFlags(cmdline[1]) + if err != nil { + return "", nil, "", err + } + } + + return cmd, flags, strings.TrimSpace(args), nil +} + +// covers comments and empty lines. Lines should be trimmed before passing to +// this function. +func stripComments(line string) string { + // string is already trimmed at this point + if tokenComment.MatchString(line) { + return tokenComment.ReplaceAllString(line, "") + } + + return line +} + +func extractBuilderFlags(line string) (string, []string, error) { + // Parses the BuilderFlags and returns the remaining part of the line + + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(line); pos++ { + if pos != len(line) { + ch = rune(line[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(line) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + + // Only keep going if the next word starts with -- + if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { + return line[pos:], words, nil + } + + phase = inWord // found someting with "--", fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(line)) { + if word != "--" && (blankOK || len(word) > 0) { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if word == "--" { + return line[pos:], words, nil + } + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + continue + } + if ch == '\\' { + if pos+1 == len(line) { + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + continue + } + if ch == '\\' { + if pos+1 == len(line) { + phase = inWord + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + } + } + + return "", words, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go new file mode 100644 index 00000000..c7142667 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go @@ -0,0 +1,314 @@ +package dockerfile + +// This will take a single word and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section + +import ( + "fmt" + "strings" + "text/scanner" + "unicode" +) + +type shellWord struct { + word string + scanner scanner.Scanner + envs []string + pos int +} + +// ProcessWord will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func ProcessWord(word string, env []string) (string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + } + sw.scanner.Init(strings.NewReader(word)) + word, _, err := sw.process() + return word, err +} + +// ProcessWords will use the 'env' list of environment variables, +// and replace any env var references in 'word' then it will also +// return a slice of strings which represents the 'word' +// split up based on spaces - taking into account quotes. Note that +// this splitting is done **after** the env var substitutions are done. +// Note, each one is trimmed to remove leading and trailing spaces (unless +// they are quoted", but ProcessWord retains spaces between words. +func ProcessWords(word string, env []string) ([]string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + } + sw.scanner.Init(strings.NewReader(word)) + _, words, err := sw.process() + return words, err +} + +func (sw *shellWord) process() (string, []string, error) { + return sw.processStopOn(scanner.EOF) +} + +type wordsStruct struct { + word string + words []string + inWord bool +} + +func (w *wordsStruct) addChar(ch rune) { + if unicode.IsSpace(ch) && w.inWord { + if len(w.word) != 0 { + w.words = append(w.words, w.word) + w.word = "" + w.inWord = false + } + } else if !unicode.IsSpace(ch) { + w.addRawChar(ch) + } +} + +func (w *wordsStruct) addRawChar(ch rune) { + w.word += string(ch) + w.inWord = true +} + +func (w *wordsStruct) addString(str string) { + var scan scanner.Scanner + scan.Init(strings.NewReader(str)) + for scan.Peek() != scanner.EOF { + w.addChar(scan.Next()) + } +} + +func (w *wordsStruct) addRawString(str string) { + w.word += str + w.inWord = true +} + +func (w *wordsStruct) getWords() []string { + if len(w.word) > 0 { + w.words = append(w.words, w.word) + + // Just in case we're called again by mistake + w.word = "" + w.inWord = false + } + return w.words +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { + var result string + var words wordsStruct + + var charFuncMapping = map[rune]func() (string, error){ + '\'': sw.processSingleQuote, + '"': sw.processDoubleQuote, + '$': sw.processDollar, + } + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + + if stopChar != scanner.EOF && ch == stopChar { + sw.scanner.Next() + break + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", []string{}, err + } + result += tmp + + if ch == rune('$') { + words.addString(tmp) + } else { + words.addRawString(tmp) + } + } else { + // Not special, just add it to the result + ch = sw.scanner.Next() + + if ch == '\\' { + // '\' escapes, except end of line + + ch = sw.scanner.Next() + + if ch == scanner.EOF { + break + } + + words.addRawChar(ch) + } else { + words.addChar(ch) + } + + result += string(ch) + } + } + + return result, words.getWords(), nil +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + var result string + + sw.scanner.Next() + + for { + ch := sw.scanner.Next() + if ch == '\'' || ch == scanner.EOF { + break + } + result += string(ch) + } + + return result, nil +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ + var result string + + sw.scanner.Next() + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if ch == '"' { + sw.scanner.Next() + break + } + if ch == '$' { + tmp, err := sw.processDollar() + if err != nil { + return "", err + } + result += tmp + } else { + ch = sw.scanner.Next() + if ch == '\\' { + chNext := sw.scanner.Peek() + + if chNext == scanner.EOF { + // Ignore \ at end of word + continue + } + + if chNext == '"' || chNext == '$' { + // \" and \$ can be escaped, all other \'s are left as-is + ch = sw.scanner.Next() + } + } + result += string(ch) + } + } + + return result, nil +} + +func (sw *shellWord) processDollar() (string, error) { + sw.scanner.Next() + ch := sw.scanner.Peek() + if ch == '{' { + sw.scanner.Next() + name := sw.processName() + ch = sw.scanner.Peek() + if ch == '}' { + // Normal ${xx} case + sw.scanner.Next() + return sw.getEnv(name), nil + } + if ch == ':' { + // Special ${xx:...} format processing + // Yes it allows for recursive $'s in the ... spot + + sw.scanner.Next() // skip over : + modifier := sw.scanner.Next() + + word, _, err := sw.processStopOn('}') + if err != nil { + return "", err + } + + // Grab the current value of the variable in question so we + // can use to to determine what to do based on the modifier + newValue := sw.getEnv(name) + + switch modifier { + case '+': + if newValue != "" { + newValue = word + } + return newValue, nil + + case '-': + if newValue == "" { + newValue = word + } + return newValue, nil + + default: + return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word) + } + } + return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word) + } + // $xxx case + name := sw.processName() + if name == "" { + return "$", nil + } + return sw.getEnv(name), nil +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name string + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if len(name) == 0 && unicode.IsDigit(ch) { + ch = sw.scanner.Next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.scanner.Next() + name += string(ch) + } + + return name +} + +func (sw *shellWord) getEnv(name string) string { + for _, env := range sw.envs { + i := strings.Index(env, "=") + if i < 0 { + if name == env { + // Should probably never get here, but just in case treat + // it like "var" and "var=" are the same + return "" + } + continue + } + if name != env[:i] { + continue + } + return env[i+1:] + } + return "" +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/support.go b/vendor/github.com/docker/docker/builder/dockerfile/support.go new file mode 100644 index 00000000..38897b2c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/support.go @@ -0,0 +1,16 @@ +package dockerfile + +import "strings" + +func handleJSONArgs(args []string, attributes map[string]bool) []string { + if len(args) == 0 { + return []string{} + } + + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return []string{strings.Join(args, " ")} +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/wordsTest b/vendor/github.com/docker/docker/builder/dockerfile/wordsTest new file mode 100644 index 00000000..fa916c67 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/wordsTest @@ -0,0 +1,25 @@ +hello | hello +hello${hi}bye | hellobye +ENV hi=hi +hello${hi}bye | hellohibye +ENV space=abc def +hello${space}bye | helloabc,defbye +hello"${space}"bye | helloabc defbye +hello "${space}"bye | hello,abc defbye +ENV leading= ab c +hello${leading}def | hello,ab,cdef +hello"${leading}" def | hello ab c,def +hello"${leading}" | hello ab c +hello${leading} | hello,ab,c +# next line MUST have 3 trailing spaces, don't erase them! +ENV trailing=ab c +hello${trailing} | helloab,c +hello${trailing}d | helloab,c,d +hello"${trailing}"d | helloab c d +# next line MUST have 3 trailing spaces, don't erase them! +hel"lo${trailing}" | helloab c +hello" there " | hello there +hello there | hello,there +hello\ there | hello there +hello" there | hello there +hello\" there | hello",there diff --git a/vendor/github.com/docker/docker/builder/dockerignore.go b/vendor/github.com/docker/docker/builder/dockerignore.go new file mode 100644 index 00000000..2990770a --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerignore.go @@ -0,0 +1,47 @@ +package builder + +import ( + "os" + + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/pkg/fileutils" +) + +// DockerIgnoreContext wraps a ModifiableContext to add a method +// for handling the .dockerignore file at the root of the context. +type DockerIgnoreContext struct { + ModifiableContext +} + +// Process reads the .dockerignore file at the root of the embedded context. +// If .dockerignore does not exist in the context, then nil is returned. +// +// It can take a list of files to be removed after .dockerignore is removed. +// This is used for server-side implementations of builders that need to send +// the .dockerignore file as well as the special files specified in filesToRemove, +// but expect them to be excluded from the context after they were processed. +// +// For example, server-side Dockerfile builders are expected to pass in the name +// of the Dockerfile to be removed after it was parsed. +// +// TODO: Don't require a ModifiableContext (use Context instead) and don't remove +// files, instead handle a list of files to be excluded from the context. +func (c DockerIgnoreContext) Process(filesToRemove []string) error { + f, err := c.Open(".dockerignore") + // Note that a missing .dockerignore file isn't treated as an error + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + excludes, _ := dockerignore.ReadAll(f) + filesToRemove = append([]string{".dockerignore"}, filesToRemove...) + for _, fileToRemove := range filesToRemove { + rm, _ := fileutils.Matches(fileToRemove, excludes) + if rm { + c.Remove(fileToRemove) + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go new file mode 100644 index 00000000..1fed3199 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go @@ -0,0 +1,35 @@ +package dockerignore + +import ( + "bufio" + "fmt" + "io" + "path/filepath" + "strings" +) + +// ReadAll reads a .dockerignore file and returns the list of file patterns +// to ignore. Note this will trim whitespace from each line as well +// as use GO's "clean" func to get the shortest/cleanest path for each. +func ReadAll(reader io.ReadCloser) ([]string, error) { + if reader == nil { + return nil, nil + } + defer reader.Close() + scanner := bufio.NewScanner(reader) + var excludes []string + + for scanner.Scan() { + pattern := strings.TrimSpace(scanner.Text()) + if pattern == "" { + continue + } + pattern = filepath.Clean(pattern) + pattern = filepath.ToSlash(pattern) + excludes = append(excludes, pattern) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("Error reading .dockerignore: %v", err) + } + return excludes, nil +} diff --git a/vendor/github.com/docker/docker/builder/git.go b/vendor/github.com/docker/docker/builder/git.go new file mode 100644 index 00000000..74df2446 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/git.go @@ -0,0 +1,28 @@ +package builder + +import ( + "os" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/gitutils" +) + +// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. +func MakeGitContext(gitURL string) (ModifiableContext, error) { + root, err := gitutils.Clone(gitURL) + if err != nil { + return nil, err + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return nil, err + } + + defer func() { + // TODO: print errors? + c.Close() + os.RemoveAll(root) + }() + return MakeTarSumContext(c) +} diff --git a/vendor/github.com/docker/docker/builder/remote.go b/vendor/github.com/docker/docker/builder/remote.go new file mode 100644 index 00000000..12f34c7b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remote.go @@ -0,0 +1,152 @@ +package builder + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "regexp" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/urlutil" +) + +// When downloading remote contexts, limit the amount (in bytes) +// to be read from the response body in order to detect its Content-Type +const maxPreambleLength = 100 + +const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` + +var mimeRe = regexp.MustCompile(acceptableRemoteMIME) + +// MakeRemoteContext downloads a context from remoteURL and returns it. +// +// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of +// maxPreambleLength bytes from the body to help detecting the MIME type. +// Look at acceptableRemoteMIME for more details. +// +// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected +// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). +// In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned. +func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (ModifiableContext, error) { + f, err := httputils.Download(remoteURL) + if err != nil { + return nil, fmt.Errorf("Error downloading remote context %s: %v", remoteURL, err) + } + defer f.Body.Close() + + var contextReader io.ReadCloser + if contentTypeHandlers != nil { + contentType := f.Header.Get("Content-Type") + clen := f.ContentLength + + contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) + if err != nil { + return nil, fmt.Errorf("Error detecting content type for remote %s: %v", remoteURL, err) + } + defer contextReader.Close() + + // This loop tries to find a content-type handler for the detected content-type. + // If it could not find one from the caller-supplied map, it tries the empty content-type `""` + // which is interpreted as a fallback handler (usually used for raw tar contexts). + for _, ct := range []string{contentType, ""} { + if fn, ok := contentTypeHandlers[ct]; ok { + defer contextReader.Close() + if contextReader, err = fn(contextReader); err != nil { + return nil, err + } + break + } + } + } + + // Pass through - this is a pre-packaged context, presumably + // with a Dockerfile with the right name inside it. + return MakeTarSumContext(contextReader) +} + +// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used +// irrespective of user input. +// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). +func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context ModifiableContext, dockerfileName string, err error) { + switch { + case remoteURL == "": + context, err = MakeTarSumContext(r) + case urlutil.IsGitURL(remoteURL): + context, err = MakeGitContext(remoteURL) + case urlutil.IsURL(remoteURL): + context, err = MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ + httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + dockerfile, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller + // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. + dockerfileName = DefaultDockerfileName + + // TODO: return a context without tarsum + return archive.Generate(dockerfileName, string(dockerfile)) + }, + // fallback handler (tar context) + "": func(rc io.ReadCloser) (io.ReadCloser, error) { + return createProgressReader(rc), nil + }, + }) + default: + err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) + } + return +} + +// inspectResponse looks into the http response data at r to determine whether its +// content-type is on the list of acceptable content types for remote build contexts. +// This function returns: +// - a string representation of the detected content-type +// - an io.Reader for the response body +// - an error value which will be non-nil either when something goes wrong while +// reading bytes from r or when the detected content-type is not acceptable. +func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) { + plen := clen + if plen <= 0 || plen > maxPreambleLength { + plen = maxPreambleLength + } + + preamble := make([]byte, plen, plen) + rlen, err := r.Read(preamble) + if rlen == 0 { + return ct, r, errors.New("Empty response") + } + if err != nil && err != io.EOF { + return ct, r, err + } + + preambleR := bytes.NewReader(preamble) + bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) + // Some web servers will use application/octet-stream as the default + // content type for files without an extension (e.g. 'Dockerfile') + // so if we receive this value we better check for text content + contentType := ct + if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream { + contentType, _, err = httputils.DetectContentType(preamble) + if err != nil { + return contentType, bodyReader, err + } + } + + contentType = selectAcceptableMIME(contentType) + var cterr error + if len(contentType) == 0 { + cterr = fmt.Errorf("unsupported Content-Type %q", ct) + contentType = ct + } + + return contentType, bodyReader, cterr +} + +func selectAcceptableMIME(ct string) string { + return mimeRe.FindString(ct) +} diff --git a/vendor/github.com/docker/docker/builder/tarsum.go b/vendor/github.com/docker/docker/builder/tarsum.go new file mode 100644 index 00000000..201e3ef7 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/tarsum.go @@ -0,0 +1,158 @@ +package builder + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/tarsum" +) + +type tarSumContext struct { + root string + sums tarsum.FileInfoSums +} + +func (c *tarSumContext) Close() error { + return os.RemoveAll(c.root) +} + +func convertPathError(err error, cleanpath string) error { + if err, ok := err.(*os.PathError); ok { + err.Path = cleanpath + return err + } + return err +} + +func (c *tarSumContext) Open(path string) (io.ReadCloser, error) { + cleanpath, fullpath, err := c.normalize(path) + if err != nil { + return nil, err + } + r, err := os.Open(fullpath) + if err != nil { + return nil, convertPathError(err, cleanpath) + } + return r, nil +} + +func (c *tarSumContext) Stat(path string) (string, FileInfo, error) { + cleanpath, fullpath, err := c.normalize(path) + if err != nil { + return "", nil, err + } + + st, err := os.Lstat(fullpath) + if err != nil { + return "", nil, convertPathError(err, cleanpath) + } + + rel, err := filepath.Rel(c.root, fullpath) + if err != nil { + return "", nil, convertPathError(err, cleanpath) + } + + // We set sum to path by default for the case where GetFile returns nil. + // The usual case is if relative path is empty. + sum := path + // Use the checksum of the followed path(not the possible symlink) because + // this is the file that is actually copied. + if tsInfo := c.sums.GetFile(rel); tsInfo != nil { + sum = tsInfo.Sum() + } + fi := &HashedFileInfo{PathFileInfo{st, fullpath, filepath.Base(cleanpath)}, sum} + return rel, fi, nil +} + +// MakeTarSumContext returns a build Context from a tar stream. +// +// It extracts the tar stream to a temporary folder that is deleted as soon as +// the Context is closed. +// As the extraction happens, a tarsum is calculated for every file, and the set of +// all those sums then becomes the source of truth for all operations on this Context. +// +// Closing tarStream has to be done by the caller. +func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) { + root, err := ioutils.TempDir("", "docker-builder") + if err != nil { + return nil, err + } + + tsc := &tarSumContext{root: root} + + // Make sure we clean-up upon error. In the happy case the caller + // is expected to manage the clean-up + defer func() { + if err != nil { + tsc.Close() + } + }() + + decompressedStream, err := archive.DecompressStream(tarStream) + if err != nil { + return nil, err + } + + sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) + if err != nil { + return nil, err + } + + if err := chrootarchive.Untar(sum, root, nil); err != nil { + return nil, err + } + + tsc.sums = sum.GetSums() + + return tsc, nil +} + +func (c *tarSumContext) normalize(path string) (cleanpath, fullpath string, err error) { + cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] + fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root) + if err != nil { + return "", "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullpath) + } + _, err = os.Lstat(fullpath) + if err != nil { + return "", "", convertPathError(err, path) + } + return +} + +func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error { + root = filepath.Join(c.root, filepath.Join(string(filepath.Separator), root)) + return filepath.Walk(root, func(fullpath string, info os.FileInfo, err error) error { + rel, err := filepath.Rel(c.root, fullpath) + if err != nil { + return err + } + if rel == "." { + return nil + } + + sum := rel + if tsInfo := c.sums.GetFile(rel); tsInfo != nil { + sum = tsInfo.Sum() + } + fi := &HashedFileInfo{PathFileInfo{FileInfo: info, FilePath: fullpath}, sum} + if err := walkFn(rel, fi, nil); err != nil { + return err + } + return nil + }) +} + +func (c *tarSumContext) Remove(path string) error { + _, fullpath, err := c.normalize(path) + if err != nil { + return err + } + return os.RemoveAll(fullpath) +} diff --git a/vendor/github.com/docker/docker/cli/cli.go b/vendor/github.com/docker/docker/cli/cli.go new file mode 100644 index 00000000..8e559fc3 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/cli.go @@ -0,0 +1,200 @@ +package cli + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" + "strings" + + flag "github.com/docker/docker/pkg/mflag" +) + +// Cli represents a command line interface. +type Cli struct { + Stderr io.Writer + handlers []Handler + Usage func() +} + +// Handler holds the different commands Cli will call +// It should have methods with names starting with `Cmd` like: +// func (h myHandler) CmdFoo(args ...string) error +type Handler interface{} + +// Initializer can be optionally implemented by a Handler to +// initialize before each call to one of its commands. +type Initializer interface { + Initialize() error +} + +// New instantiates a ready-to-use Cli. +func New(handlers ...Handler) *Cli { + // make the generic Cli object the first cli handler + // in order to handle `docker help` appropriately + cli := new(Cli) + cli.handlers = append([]Handler{cli}, handlers...) + return cli +} + +// initErr is an error returned upon initialization of a handler implementing Initializer. +type initErr struct{ error } + +func (err initErr) Error() string { + return err.Error() +} + +func (cli *Cli) command(args ...string) (func(...string) error, error) { + for _, c := range cli.handlers { + if c == nil { + continue + } + camelArgs := make([]string, len(args)) + for i, s := range args { + if len(s) == 0 { + return nil, errors.New("empty command") + } + camelArgs[i] = strings.ToUpper(s[:1]) + strings.ToLower(s[1:]) + } + methodName := "Cmd" + strings.Join(camelArgs, "") + method := reflect.ValueOf(c).MethodByName(methodName) + if method.IsValid() { + if c, ok := c.(Initializer); ok { + if err := c.Initialize(); err != nil { + return nil, initErr{err} + } + } + return method.Interface().(func(...string) error), nil + } + } + return nil, errors.New("command not found") +} + +// Run executes the specified command. +func (cli *Cli) Run(args ...string) error { + if len(args) > 1 { + command, err := cli.command(args[:2]...) + switch err := err.(type) { + case nil: + return command(args[2:]...) + case initErr: + return err.error + } + } + if len(args) > 0 { + command, err := cli.command(args[0]) + switch err := err.(type) { + case nil: + return command(args[1:]...) + case initErr: + return err.error + } + cli.noSuchCommand(args[0]) + } + return cli.CmdHelp() +} + +func (cli *Cli) noSuchCommand(command string) { + if cli.Stderr == nil { + cli.Stderr = os.Stderr + } + fmt.Fprintf(cli.Stderr, "docker: '%s' is not a docker command.\nSee 'docker --help'.\n", command) + os.Exit(1) +} + +// CmdHelp displays information on a Docker command. +// +// If more than one command is specified, information is only shown for the first command. +// +// Usage: docker help COMMAND or docker COMMAND --help +func (cli *Cli) CmdHelp(args ...string) error { + if len(args) > 1 { + command, err := cli.command(args[:2]...) + switch err := err.(type) { + case nil: + command("--help") + return nil + case initErr: + return err.error + } + } + if len(args) > 0 { + command, err := cli.command(args[0]) + switch err := err.(type) { + case nil: + command("--help") + return nil + case initErr: + return err.error + } + cli.noSuchCommand(args[0]) + } + + if cli.Usage == nil { + flag.Usage() + } else { + cli.Usage() + } + + return nil +} + +// Subcmd is a subcommand of the main "docker" command. +// A subcommand represents an action that can be performed +// from the Docker command line client. +// +// To see all available subcommands, run "docker --help". +func Subcmd(name string, synopses []string, description string, exitOnError bool) *flag.FlagSet { + var errorHandling flag.ErrorHandling + if exitOnError { + errorHandling = flag.ExitOnError + } else { + errorHandling = flag.ContinueOnError + } + flags := flag.NewFlagSet(name, errorHandling) + flags.Usage = func() { + flags.ShortUsage() + flags.PrintDefaults() + } + + flags.ShortUsage = func() { + options := "" + if flags.FlagCountUndeprecated() > 0 { + options = " [OPTIONS]" + } + + if len(synopses) == 0 { + synopses = []string{""} + } + + // Allow for multiple command usage synopses. + for i, synopsis := range synopses { + lead := "\t" + if i == 0 { + // First line needs the word 'Usage'. + lead = "Usage:\t" + } + + if synopsis != "" { + synopsis = " " + synopsis + } + + fmt.Fprintf(flags.Out(), "\n%sdocker %s%s%s", lead, name, options, synopsis) + } + + fmt.Fprintf(flags.Out(), "\n\n%s\n", description) + } + + return flags +} + +// An StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} diff --git a/vendor/github.com/docker/docker/cli/client.go b/vendor/github.com/docker/docker/cli/client.go new file mode 100644 index 00000000..6a82eb52 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/client.go @@ -0,0 +1,12 @@ +package cli + +import flag "github.com/docker/docker/pkg/mflag" + +// ClientFlags represents flags for the docker client. +type ClientFlags struct { + FlagSet *flag.FlagSet + Common *CommonFlags + PostParse func() + + ConfigDir string +} diff --git a/vendor/github.com/docker/docker/cli/common.go b/vendor/github.com/docker/docker/cli/common.go new file mode 100644 index 00000000..7f6a24ba --- /dev/null +++ b/vendor/github.com/docker/docker/cli/common.go @@ -0,0 +1,80 @@ +package cli + +import ( + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/go-connections/tlsconfig" +) + +// CommonFlags represents flags that are common to both the client and the daemon. +type CommonFlags struct { + FlagSet *flag.FlagSet + PostParse func() + + Debug bool + Hosts []string + LogLevel string + TLS bool + TLSVerify bool + TLSOptions *tlsconfig.Options + TrustKey string +} + +// Command is the struct containing the command name and description +type Command struct { + Name string + Description string +} + +var dockerCommands = []Command{ + {"attach", "Attach to a running container"}, + {"build", "Build an image from a Dockerfile"}, + {"commit", "Create a new image from a container's changes"}, + {"cp", "Copy files/folders between a container and the local filesystem"}, + {"create", "Create a new container"}, + {"diff", "Inspect changes on a container's filesystem"}, + {"events", "Get real time events from the server"}, + {"exec", "Run a command in a running container"}, + {"export", "Export a container's filesystem as a tar archive"}, + {"history", "Show the history of an image"}, + {"images", "List images"}, + {"import", "Import the contents from a tarball to create a filesystem image"}, + {"info", "Display system-wide information"}, + {"inspect", "Return low-level information on a container or image"}, + {"kill", "Kill a running container"}, + {"load", "Load an image from a tar archive or STDIN"}, + {"login", "Log in to a Docker registry"}, + {"logout", "Log out from a Docker registry"}, + {"logs", "Fetch the logs of a container"}, + {"network", "Manage Docker networks"}, + {"pause", "Pause all processes within a container"}, + {"port", "List port mappings or a specific mapping for the CONTAINER"}, + {"ps", "List containers"}, + {"pull", "Pull an image or a repository from a registry"}, + {"push", "Push an image or a repository to a registry"}, + {"rename", "Rename a container"}, + {"restart", "Restart a container"}, + {"rm", "Remove one or more containers"}, + {"rmi", "Remove one or more images"}, + {"run", "Run a command in a new container"}, + {"save", "Save one or more images to a tar archive"}, + {"search", "Search the Docker Hub for images"}, + {"start", "Start one or more stopped containers"}, + {"stats", "Display a live stream of container(s) resource usage statistics"}, + {"stop", "Stop a running container"}, + {"tag", "Tag an image into a repository"}, + {"top", "Display the running processes of a container"}, + {"unpause", "Unpause all processes within a container"}, + {"update", "Update configuration of one or more containers"}, + {"version", "Show the Docker version information"}, + {"volume", "Manage Docker volumes"}, + {"wait", "Block until a container stops, then print its exit code"}, +} + +// DockerCommands stores all the docker command +var DockerCommands = make(map[string]Command) + +func init() { + for _, cmd := range dockerCommands { + DockerCommands[cmd.Name] = cmd + } +} diff --git a/vendor/github.com/docker/docker/cliconfig/config.go b/vendor/github.com/docker/docker/cliconfig/config.go index bdd75871..54e0ea38 100644 --- a/vendor/github.com/docker/docker/cliconfig/config.go +++ b/vendor/github.com/docker/docker/cliconfig/config.go @@ -11,11 +11,13 @@ import ( "strings" "github.com/docker/docker/pkg/homedir" + "github.com/docker/engine-api/types" ) const ( // ConfigFileName is the name of config file ConfigFileName = "config.json" + configFileDir = ".docker" oldConfigfile = ".dockercfg" // This constant is only used for really old config files when the @@ -30,7 +32,7 @@ var ( func init() { if configDir == "" { - configDir = filepath.Join(homedir.Get(), ".docker") + configDir = filepath.Join(homedir.Get(), configFileDir) } } @@ -44,27 +46,21 @@ func SetConfigDir(dir string) { configDir = dir } -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth"` - Email string `json:"email"` - ServerAddress string `json:"serveraddress,omitempty"` -} - // ConfigFile ~/.docker/config.json file info type ConfigFile struct { - AuthConfigs map[string]AuthConfig `json:"auths"` - HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` - PsFormat string `json:"psFormat,omitempty"` - filename string // Note: not serialized - for internal use only + AuthConfigs map[string]types.AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + ImagesFormat string `json:"imagesFormat,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + filename string // Note: not serialized - for internal use only } -// NewConfigFile initilizes an empty configuration file for the given filename 'fn' +// NewConfigFile initializes an empty configuration file for the given filename 'fn' func NewConfigFile(fn string) *ConfigFile { return &ConfigFile{ - AuthConfigs: make(map[string]AuthConfig), + AuthConfigs: make(map[string]types.AuthConfig), HTTPHeaders: make(map[string]string), filename: fn, } @@ -83,25 +79,20 @@ func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { if len(arr) < 2 { return fmt.Errorf("The Auth config file is empty") } - authConfig := AuthConfig{} + authConfig := types.AuthConfig{} origAuth := strings.Split(arr[0], " = ") if len(origAuth) != 2 { return fmt.Errorf("Invalid Auth config file") } - authConfig.Username, authConfig.Password, err = DecodeAuth(origAuth[1]) + authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) if err != nil { return err } - origEmail := strings.Split(arr[1], " = ") - if len(origEmail) != 2 { - return fmt.Errorf("Invalid Auth config file") - } - authConfig.Email = origEmail[1] authConfig.ServerAddress = defaultIndexserver configFile.AuthConfigs[defaultIndexserver] = authConfig } else { for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = DecodeAuth(authConfig.Auth) + authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) if err != nil { return err } @@ -121,7 +112,7 @@ func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { } var err error for addr, ac := range configFile.AuthConfigs { - ac.Username, ac.Password, err = DecodeAuth(ac.Auth) + ac.Username, ac.Password, err = decodeAuth(ac.Auth) if err != nil { return err } @@ -132,11 +123,18 @@ func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { return nil } +// ContainsAuth returns whether there is authentication configured +// in this file or not. +func (configFile *ConfigFile) ContainsAuth() bool { + return configFile.CredentialsStore != "" || + (configFile.AuthConfigs != nil && len(configFile.AuthConfigs) > 0) +} + // LegacyLoadFromReader is a convenience function that creates a ConfigFile object from // a non-nested reader func LegacyLoadFromReader(configData io.Reader) (*ConfigFile, error) { configFile := ConfigFile{ - AuthConfigs: make(map[string]AuthConfig), + AuthConfigs: make(map[string]types.AuthConfig), } err := configFile.LegacyLoadFromReader(configData) return &configFile, err @@ -146,7 +144,7 @@ func LegacyLoadFromReader(configData io.Reader) (*ConfigFile, error) { // a reader func LoadFromReader(configData io.Reader) (*ConfigFile, error) { configFile := ConfigFile{ - AuthConfigs: make(map[string]AuthConfig), + AuthConfigs: make(map[string]types.AuthConfig), } err := configFile.LoadFromReader(configData) return &configFile, err @@ -161,7 +159,7 @@ func Load(configDir string) (*ConfigFile, error) { } configFile := ConfigFile{ - AuthConfigs: make(map[string]AuthConfig), + AuthConfigs: make(map[string]types.AuthConfig), filename: filepath.Join(configDir, ConfigFileName), } @@ -169,15 +167,18 @@ func Load(configDir string) (*ConfigFile, error) { if _, err := os.Stat(configFile.filename); err == nil { file, err := os.Open(configFile.filename) if err != nil { - return &configFile, err + return &configFile, fmt.Errorf("%s - %v", configFile.filename, err) } defer file.Close() err = configFile.LoadFromReader(file) + if err != nil { + err = fmt.Errorf("%s - %v", configFile.filename, err) + } return &configFile, err } else if !os.IsNotExist(err) { // if file is there but we can't stat it for any reason other // than it doesn't exist then stop - return &configFile, err + return &configFile, fmt.Errorf("%s - %v", configFile.filename, err) } // Can't find latest config file so check for the old one @@ -187,22 +188,29 @@ func Load(configDir string) (*ConfigFile, error) { } file, err := os.Open(confFile) if err != nil { - return &configFile, err + return &configFile, fmt.Errorf("%s - %v", confFile, err) } defer file.Close() err = configFile.LegacyLoadFromReader(file) - return &configFile, err + if err != nil { + return &configFile, fmt.Errorf("%s - %v", confFile, err) + } + + if configFile.HTTPHeaders == nil { + configFile.HTTPHeaders = map[string]string{} + } + return &configFile, nil } // SaveToWriter encodes and writes out all the authorization information to // the given writer func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { // Encode sensitive data into a new/temp struct - tmpAuthConfigs := make(map[string]AuthConfig, len(configFile.AuthConfigs)) + tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) for k, authConfig := range configFile.AuthConfigs { authCopy := authConfig // encode and save the authstring, while blanking out the original fields - authCopy.Auth = EncodeAuth(&authCopy) + authCopy.Auth = encodeAuth(&authCopy) authCopy.Username = "" authCopy.Password = "" authCopy.ServerAddress = "" @@ -243,8 +251,12 @@ func (configFile *ConfigFile) Filename() string { return configFile.filename } -// EncodeAuth creates a base64 encoded string to containing authorization information -func EncodeAuth(authConfig *AuthConfig) string { +// encodeAuth creates a base64 encoded string to containing authorization information +func encodeAuth(authConfig *types.AuthConfig) string { + if authConfig.Username == "" && authConfig.Password == "" { + return "" + } + authStr := authConfig.Username + ":" + authConfig.Password msg := []byte(authStr) encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) @@ -252,8 +264,12 @@ func EncodeAuth(authConfig *AuthConfig) string { return string(encoded) } -// DecodeAuth decodes a base64 encoded string and returns username and password -func DecodeAuth(authStr string) (string, string, error) { +// decodeAuth decodes a base64 encoded string and returns username and password +func decodeAuth(authStr string) (string, string, error) { + if authStr == "" { + return "", "", nil + } + decLen := base64.StdEncoding.DecodedLen(len(authStr)) decoded := make([]byte, decLen) authByte := []byte(authStr) diff --git a/vendor/github.com/docker/docker/cliconfig/config_test.go b/vendor/github.com/docker/docker/cliconfig/config_test.go deleted file mode 100644 index dcf368c5..00000000 --- a/vendor/github.com/docker/docker/cliconfig/config_test.go +++ /dev/null @@ -1,458 +0,0 @@ -package cliconfig - -import ( - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/docker/docker/pkg/homedir" -) - -func TestEmptyConfigDir(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - SetConfigDir(tmpHome) - - config, err := Load("") - if err != nil { - t.Fatalf("Failed loading on empty config dir: %q", err) - } - - expectedConfigFilename := filepath.Join(tmpHome, ConfigFileName) - if config.Filename() != expectedConfigFilename { - t.Fatalf("Expected config filename %s, got %s", expectedConfigFilename, config.Filename()) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestMissingFile(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on missing file: %q", err) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestSaveFileToDirs(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - tmpHome += "/.docker" - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on missing file: %q", err) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestEmptyFile(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - if err := ioutil.WriteFile(fn, []byte(""), 0600); err != nil { - t.Fatal(err) - } - - _, err = Load(tmpHome) - if err == nil { - t.Fatalf("Was supposed to fail") - } -} - -func TestEmptyJson(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - if err := ioutil.WriteFile(fn, []byte("{}"), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestOldInvalidsAuth(t *testing.T) { - invalids := map[string]string{ - `username = test`: "The Auth config file is empty", - `username -password -email`: "Invalid Auth config file", - `username = test -email`: "Invalid auth configuration file", - `username = am9lam9lOmhlbGxv -email`: "Invalid Auth config file", - } - - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - for content, expectedError := range invalids { - fn := filepath.Join(tmpHome, oldConfigfile) - if err := ioutil.WriteFile(fn, []byte(content), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err == nil || err.Error() != expectedError { - t.Fatalf("Should have failed, got: %q, %q", config, err) - } - - } -} - -func TestOldValidAuth(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - fn := filepath.Join(tmpHome, oldConfigfile) - js := `username = am9lam9lOmhlbGxv -email = user@example.com` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatal(err) - } - - // defaultIndexserver is https://index.docker.io/v1/ - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - if !strings.Contains(configStr, "user@example.com") { - t.Fatalf("Should have save in new form: %s", configStr) - } -} - -func TestOldJsonInvalid(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - fn := filepath.Join(tmpHome, oldConfigfile) - js := `{"https://index.docker.io/v1/":{"auth":"test","email":"user@example.com"}}` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err == nil || err.Error() != "Invalid auth configuration file" { - t.Fatalf("Expected an error got : %v, %v", config, err) - } -} - -func TestOldJson(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - fn := filepath.Join(tmpHome, oldConfigfile) - js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - if !strings.Contains(configStr, "user@example.com") { - t.Fatalf("Should have save in new form: %s", configStr) - } -} - -func TestNewJson(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - if !strings.Contains(configStr, "user@example.com") { - t.Fatalf("Should have save in new form: %s", configStr) - } -} - -func TestJsonWithPsFormat(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - js := `{ - "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { - t.Fatalf("Unknown ps format: %s\n", config.PsFormat) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - if !strings.Contains(configStr, `"psFormat":`) || - !strings.Contains(configStr, "{{.ID}}") { - t.Fatalf("Should have save in new form: %s", configStr) - } -} - -// Save it and make sure it shows up in new form -func saveConfigAndValidateNewFormat(t *testing.T, config *ConfigFile, homeFolder string) string { - err := config.Save() - if err != nil { - t.Fatalf("Failed to save: %q", err) - } - - buf, err := ioutil.ReadFile(filepath.Join(homeFolder, ConfigFileName)) - if !strings.Contains(string(buf), `"auths":`) { - t.Fatalf("Should have save in new form: %s", string(buf)) - } - return string(buf) -} - -func TestConfigDir(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - if ConfigDir() == tmpHome { - t.Fatalf("Expected ConfigDir to be different than %s by default, but was the same", tmpHome) - } - - // Update configDir - SetConfigDir(tmpHome) - - if ConfigDir() != tmpHome { - t.Fatalf("Expected ConfigDir to %s, but was %s", tmpHome, ConfigDir()) - } -} - -func TestConfigFile(t *testing.T) { - configFilename := "configFilename" - configFile := NewConfigFile(configFilename) - - if configFile.Filename() != configFilename { - t.Fatalf("Expected %s, got %s", configFilename, configFile.Filename()) - } -} - -func TestJsonReaderNoFile(t *testing.T) { - js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` - - config, err := LoadFromReader(strings.NewReader(js)) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - -} - -func TestOldJsonReaderNoFile(t *testing.T) { - js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` - - config, err := LegacyLoadFromReader(strings.NewReader(js)) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } -} - -func TestJsonWithPsFormatNoFile(t *testing.T) { - js := `{ - "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - config, err := LoadFromReader(strings.NewReader(js)) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { - t.Fatalf("Unknown ps format: %s\n", config.PsFormat) - } - -} - -func TestJsonSaveWithNoFile(t *testing.T) { - js := `{ - "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - config, err := LoadFromReader(strings.NewReader(js)) - err = config.Save() - if err == nil { - t.Fatalf("Expected error. File should not have been able to save with no file name.") - } - - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatalf("Failed to create a temp dir: %q", err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - err = config.SaveToWriter(f) - if err != nil { - t.Fatalf("Failed saving to file: %q", err) - } - buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) - if !strings.Contains(string(buf), `"auths":`) || - !strings.Contains(string(buf), "user@example.com") { - t.Fatalf("Should have save in new form: %s", string(buf)) - } - -} -func TestLegacyJsonSaveWithNoFile(t *testing.T) { - - js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` - config, err := LegacyLoadFromReader(strings.NewReader(js)) - err = config.Save() - if err == nil { - t.Fatalf("Expected error. File should not have been able to save with no file name.") - } - - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatalf("Failed to create a temp dir: %q", err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - err = config.SaveToWriter(f) - if err != nil { - t.Fatalf("Failed saving to file: %q", err) - } - buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) - if !strings.Contains(string(buf), `"auths":`) || - !strings.Contains(string(buf), "user@example.com") { - t.Fatalf("Should have save in new form: %s", string(buf)) - } -} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go b/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go new file mode 100644 index 00000000..510cf8cf --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go @@ -0,0 +1,17 @@ +package credentials + +import ( + "github.com/docker/engine-api/types" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Erase removes credentials from the store for a given server. + Erase(serverAddress string) error + // Get retrieves credentials from the store for a given server. + Get(serverAddress string) (types.AuthConfig, error) + // GetAll retrieves all the credentials from the store. + GetAll() (map[string]types.AuthConfig, error) + // Store saves credentials in the store. + Store(authConfig types.AuthConfig) error +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go new file mode 100644 index 00000000..3cdc8c38 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go @@ -0,0 +1,22 @@ +package credentials + +import ( + "github.com/docker/containerd/subreaper/exec" + + "github.com/docker/docker/cliconfig" +) + +// DetectDefaultStore sets the default credentials store +// if the host includes the default store helper program. +func DetectDefaultStore(c *cliconfig.ConfigFile) { + if c.CredentialsStore != "" { + // user defined + return + } + + if defaultCredentialsStore != "" { + if _, err := exec.LookPath(remoteCredentialsPrefix + defaultCredentialsStore); err == nil { + c.CredentialsStore = defaultCredentialsStore + } + } +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go new file mode 100644 index 00000000..63e8ed40 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "osxkeychain" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go new file mode 100644 index 00000000..864c540f --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "secretservice" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go new file mode 100644 index 00000000..519ef53d --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go @@ -0,0 +1,5 @@ +// +build !windows,!darwin,!linux + +package credentials + +const defaultCredentialsStore = "" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go new file mode 100644 index 00000000..8e7edd62 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go @@ -0,0 +1,67 @@ +package credentials + +import ( + "strings" + + "github.com/docker/docker/cliconfig" + "github.com/docker/engine-api/types" +) + +// fileStore implements a credentials store using +// the docker configuration file to keep the credentials in plain text. +type fileStore struct { + file *cliconfig.ConfigFile +} + +// NewFileStore creates a new file credentials store. +func NewFileStore(file *cliconfig.ConfigFile) Store { + return &fileStore{ + file: file, + } +} + +// Erase removes the given credentials from the file store. +func (c *fileStore) Erase(serverAddress string) error { + delete(c.file.AuthConfigs, serverAddress) + return c.file.Save() +} + +// Get retrieves credentials for a specific server from the file store. +func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { + authConfig, ok := c.file.AuthConfigs[serverAddress] + if !ok { + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for registry, ac := range c.file.AuthConfigs { + if serverAddress == convertToHostname(registry) { + return ac, nil + } + } + + authConfig = types.AuthConfig{} + } + return authConfig, nil +} + +func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { + return c.file.AuthConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *fileStore) Store(authConfig types.AuthConfig) error { + c.file.AuthConfigs[authConfig.ServerAddress] = authConfig + return c.file.Save() +} + +func convertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.Replace(url, "http://", "", 1) + } else if strings.HasPrefix(url, "https://") { + stripped = strings.Replace(url, "https://", "", 1) + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go new file mode 100644 index 00000000..9b8997dd --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go @@ -0,0 +1,196 @@ +package credentials + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" + "github.com/docker/engine-api/types" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" + tokenUsername = "" +) + +// Standarize the not found error, so every helper returns +// the same message and docker can handle it properly. +var errCredentialsNotFound = errors.New("credentials not found in native keychain") + +// command is an interface that remote executed commands implement. +type command interface { + Output() ([]byte, error) + Input(in io.Reader) +} + +// credentialsRequest holds information shared between docker and a remote credential store. +type credentialsRequest struct { + ServerURL string + Username string + Secret string +} + +// credentialsGetResponse is the information serialized from a remote store +// when the plugin sends requests to get the user credentials. +type credentialsGetResponse struct { + Username string + Secret string +} + +// nativeStore implements a credentials store +// using native keychain to keep credentials secure. +// It piggybacks into a file store to keep users' emails. +type nativeStore struct { + commandFn func(args ...string) command + fileStore Store +} + +// NewNativeStore creates a new native store that +// uses a remote helper program to manage credentials. +func NewNativeStore(file *cliconfig.ConfigFile) Store { + return &nativeStore{ + commandFn: shellCommandFn(file.CredentialsStore), + fileStore: NewFileStore(file), + } +} + +// Erase removes the given credentials from the native store. +func (c *nativeStore) Erase(serverAddress string) error { + if err := c.eraseCredentialsFromStore(serverAddress); err != nil { + return err + } + + // Fallback to plain text store to remove email + return c.fileStore.Erase(serverAddress) +} + +// Get retrieves credentials for a specific server from the native store. +func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { + // load user email if it exist or an empty auth config. + auth, _ := c.fileStore.Get(serverAddress) + + creds, err := c.getCredentialsFromStore(serverAddress) + if err != nil { + return auth, err + } + auth.Username = creds.Username + auth.IdentityToken = creds.IdentityToken + auth.Password = creds.Password + + return auth, nil +} + +// GetAll retrieves all the credentials from the native store. +func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { + auths, _ := c.fileStore.GetAll() + + for s, ac := range auths { + creds, _ := c.getCredentialsFromStore(s) + ac.Username = creds.Username + ac.Password = creds.Password + ac.IdentityToken = creds.IdentityToken + auths[s] = ac + } + + return auths, nil +} + +// Store saves the given credentials in the file store. +func (c *nativeStore) Store(authConfig types.AuthConfig) error { + if err := c.storeCredentialsInStore(authConfig); err != nil { + return err + } + authConfig.Username = "" + authConfig.Password = "" + authConfig.IdentityToken = "" + + // Fallback to old credential in plain text to save only the email + return c.fileStore.Store(authConfig) +} + +// storeCredentialsInStore executes the command to store the credentials in the native store. +func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { + cmd := c.commandFn("store") + creds := &credentialsRequest{ + ServerURL: config.ServerAddress, + Username: config.Username, + Secret: config.Password, + } + + if config.IdentityToken != "" { + creds.Username = tokenUsername + creds.Secret = config.IdentityToken + } + + buffer := new(bytes.Buffer) + if err := json.NewEncoder(buffer).Encode(creds); err != nil { + return err + } + cmd.Input(buffer) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + logrus.Debugf("error adding credentials - err: %v, out: `%s`", err, t) + return fmt.Errorf(t) + } + + return nil +} + +// getCredentialsFromStore executes the command to get the credentials from the native store. +func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { + var ret types.AuthConfig + + cmd := c.commandFn("get") + cmd.Input(strings.NewReader(serverAddress)) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + // do not return an error if the credentials are not + // in the keyckain. Let docker ask for new credentials. + if t == errCredentialsNotFound.Error() { + return ret, nil + } + + logrus.Debugf("error getting credentials - err: %v, out: `%s`", err, t) + return ret, fmt.Errorf(t) + } + + var resp credentialsGetResponse + if err := json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { + return ret, err + } + + if resp.Username == tokenUsername { + ret.IdentityToken = resp.Secret + } else { + ret.Password = resp.Secret + ret.Username = resp.Username + } + + ret.ServerAddress = serverAddress + return ret, nil +} + +// eraseCredentialsFromStore executes the command to remove the server credentails from the native store. +func (c *nativeStore) eraseCredentialsFromStore(serverURL string) error { + cmd := c.commandFn("erase") + cmd.Input(strings.NewReader(serverURL)) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + logrus.Debugf("error erasing credentials - err: %v, out: `%s`", err, t) + return fmt.Errorf(t) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/shell_command.go b/vendor/github.com/docker/docker/cliconfig/credentials/shell_command.go new file mode 100644 index 00000000..b2afde63 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/shell_command.go @@ -0,0 +1,28 @@ +package credentials + +import ( + "io" + "github.com/docker/containerd/subreaper/exec" +) + +func shellCommandFn(storeName string) func(args ...string) command { + name := remoteCredentialsPrefix + storeName + return func(args ...string) command { + return &shell{cmd: exec.Command(name, args...)} + } +} + +// shell invokes shell commands to talk with a remote credentials helper. +type shell struct { + cmd *exec.Cmd +} + +// Output returns responses from the remote credentials helper. +func (s *shell) Output() ([]byte, error) { + return s.cmd.Output() +} + +// Input sets the input to send to a remote credentials helper. +func (s *shell) Input(in io.Reader) { + s.cmd.Stdin = in +} diff --git a/vendor/github.com/docker/docker/container/archive.go b/vendor/github.com/docker/docker/container/archive.go new file mode 100644 index 00000000..95b68285 --- /dev/null +++ b/vendor/github.com/docker/docker/container/archive.go @@ -0,0 +1,69 @@ +package container + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/engine-api/types" +) + +// ResolvePath resolves the given path in the container to a resource on the +// host. Returns a resolved path (absolute path to the resource on the host), +// the absolute path to the resource relative to the container's rootfs, and +// a error if the path points to outside the container's rootfs. +func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { + // Consider the given path as an absolute path in the container. + absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // Split the absPath into its Directory and Base components. We will + // resolve the dir in the scope of the container then append the base. + dirPath, basePath := filepath.Split(absPath) + + resolvedDirPath, err := container.GetResourcePath(dirPath) + if err != nil { + return "", "", err + } + + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + + return resolvedPath, absPath, nil +} + +// StatPath is the unexported version of StatPath. Locks and mounts should +// be acquired before calling this method and the given path should be fully +// resolved to a path on the host corresponding to the given absolute path +// inside the container. +func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { + lstat, err := os.Lstat(resolvedPath) + if err != nil { + return nil, err + } + + var linkTarget string + if lstat.Mode()&os.ModeSymlink != 0 { + // Fully evaluate the symlink in the scope of the container rootfs. + hostPath, err := container.GetResourcePath(absPath) + if err != nil { + return nil, err + } + + linkTarget, err = filepath.Rel(container.BaseFS, hostPath) + if err != nil { + return nil, err + } + + // Make it an absolute path. + linkTarget = filepath.Join(string(filepath.Separator), linkTarget) + } + + return &types.ContainerPathStat{ + Name: filepath.Base(absPath), + Size: lstat.Size(), + Mode: lstat.Mode(), + Mtime: lstat.ModTime(), + LinkTarget: linkTarget, + }, nil +} diff --git a/vendor/github.com/docker/docker/container/container.go b/vendor/github.com/docker/docker/container/container.go new file mode 100644 index 00000000..9bdd112d --- /dev/null +++ b/vendor/github.com/docker/docker/container/container.go @@ -0,0 +1,649 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/restartmanager" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" +) + +const configFileName = "config.v2.json" + +var ( + errInvalidEndpoint = fmt.Errorf("invalid endpoint while building port map info") + errInvalidNetwork = fmt.Errorf("invalid network settings while building port map info") +) + +// CommonContainer holds the fields for a container which are +// applicable across all platforms supported by the daemon. +type CommonContainer struct { + *runconfig.StreamConfig + // embed for Container to support states directly. + *State `json:"State"` // Needed for remote api version <= 1.11 + Root string `json:"-"` // Path to the "home" of the container, including metadata. + BaseFS string `json:"-"` // Path to the graphdriver mountpoint + RWLayer layer.RWLayer `json:"-"` + ID string + Created time.Time + Path string + Args []string + Config *containertypes.Config + ImageID image.ID `json:"Image"` + NetworkSettings *network.Settings + LogPath string + Name string + Driver string + // MountLabel contains the options for the 'mount' command + MountLabel string + ProcessLabel string + RestartCount int + HasBeenStartedBefore bool + HasBeenManuallyStopped bool // used for unless-stopped restart policy + MountPoints map[string]*volume.MountPoint + HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable + ExecCommands *exec.Store `json:"-"` + // logDriver for closing + LogDriver logger.Logger `json:"-"` + LogCopier *logger.Copier `json:"-"` + restartManager restartmanager.RestartManager + attachContext *attachContext +} + +// NewBaseContainer creates a new container with its +// basic configuration. +func NewBaseContainer(id, root string) *Container { + return &Container{ + CommonContainer: CommonContainer{ + ID: id, + State: NewState(), + ExecCommands: exec.NewStore(), + Root: root, + MountPoints: make(map[string]*volume.MountPoint), + StreamConfig: runconfig.NewStreamConfig(), + attachContext: &attachContext{}, + }, + } +} + +// FromDisk loads the container configuration stored in the host. +func (container *Container) FromDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := os.Open(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + dec := json.NewDecoder(jsonSource) + + // Load container settings + if err := dec.Decode(container); err != nil { + return err + } + + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return err + } + return container.readHostConfig() +} + +// ToDisk saves the container configuration on disk. +func (container *Container) ToDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := os.Create(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + enc := json.NewEncoder(jsonSource) + + // Save container settings + if err := enc.Encode(container); err != nil { + return err + } + + return container.WriteHostConfig() +} + +// ToDiskLocking saves the container configuration on disk in a thread safe way. +func (container *Container) ToDiskLocking() error { + container.Lock() + err := container.ToDisk() + container.Unlock() + return err +} + +// readHostConfig reads the host configuration from disk for the container. +func (container *Container) readHostConfig() error { + container.HostConfig = &containertypes.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.HostConfig, + // but that's OK, since we just did that above.) + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := os.Open(pth) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { + return err + } + + container.InitDNSHostConfig() + + return nil +} + +// WriteHostConfig saves the host configuration on disk for the container. +func (container *Container) WriteHostConfig() error { + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := os.Create(pth) + if err != nil { + return err + } + defer f.Close() + + return json.NewEncoder(f).Encode(&container.HostConfig) +} + +// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir +func (container *Container) SetupWorkingDirectory(rootUID, rootGID int) error { + if container.Config.WorkingDir == "" { + return nil + } + + // If can't mount container FS at this point (eg Hyper-V Containers on + // Windows) bail out now with no action. + if !container.canMountFS() { + return nil + } + + container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) + + pth, err := container.GetResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + if err := idtools.MkdirAllNewAs(pth, 0755, rootUID, rootGID); err != nil { + pthInfo, err2 := os.Stat(pth) + if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { + return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + } + + return err + } + + return nil +} + +// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path +// sanitisation. Symlinks are all scoped to the BaseFS of the container, as +// though the container's BaseFS was `/`. +// +// The BaseFS of a container is the host-facing path which is bind-mounted as +// `/` inside the container. This method is essentially used to access a +// particular path inside the container as though you were a process in that +// container. +// +// NOTE: The returned path is *only* safely scoped inside the container's BaseFS +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + + cleanPath := cleanResourcePath(path) + r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) + return r, e +} + +// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path +// sanitisation. Symlinks are all scoped to the root of the container, as +// though the container's root was `/`. +// +// The root of a container is the host-facing configuration metadata directory. +// Only use this method to safely access the container's `container.json` or +// other metadata files. If in doubt, use container.GetResourcePath. +// +// NOTE: The returned path is *only* safely scoped inside the container's root +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetRootResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + cleanPath := filepath.Join(string(os.PathSeparator), path) + return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) +} + +// ExitOnNext signals to the monitor that it should not restart the container +// after we send the kill signal. +func (container *Container) ExitOnNext() { + if container.restartManager != nil { + container.restartManager.Cancel() + } +} + +// HostConfigPath returns the path to the container's JSON hostconfig +func (container *Container) HostConfigPath() (string, error) { + return container.GetRootResourcePath("hostconfig.json") +} + +// ConfigPath returns the path to the container's JSON config +func (container *Container) ConfigPath() (string, error) { + return container.GetRootResourcePath(configFileName) +} + +// StartLogger starts a new logger driver for the container. +func (container *Container) StartLogger(cfg containertypes.LogConfig) (logger.Logger, error) { + c, err := logger.GetLogDriver(cfg.Type) + if err != nil { + return nil, fmt.Errorf("Failed to get logging factory: %v", err) + } + ctx := logger.Context{ + Config: cfg.Config, + ContainerID: container.ID, + ContainerName: container.Name, + ContainerEntrypoint: container.Path, + ContainerArgs: container.Args, + ContainerImageID: container.ImageID.String(), + ContainerImageName: container.Config.Image, + ContainerCreated: container.Created, + ContainerEnv: container.Config.Env, + ContainerLabels: container.Config.Labels, + } + + // Set logging file for "json-logger" + if cfg.Type == jsonfilelog.Name { + ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) + if err != nil { + return nil, err + } + } + return c(ctx) +} + +// GetProcessLabel returns the process label for the container. +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.HostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +// GetMountLabel returns the mounting label for the container. +// This label is empty if the container is privileged. +func (container *Container) GetMountLabel() string { + if container.HostConfig.Privileged { + return "" + } + return container.MountLabel +} + +// GetExecIDs returns the list of exec commands running on the container. +func (container *Container) GetExecIDs() []string { + return container.ExecCommands.List() +} + +// Attach connects to the container's TTY, delegating to standard +// streams or websockets depending on the configuration. +func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { + ctx := container.InitAttachContext() + return AttachStreams(ctx, container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr, keys) +} + +// AttachStreams connects streams to a TTY. +// Used by exec too. Should this move somewhere else? +func AttachStreams(ctx context.Context, streamConfig *runconfig.StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { + var ( + cStdout, cStderr io.ReadCloser + cStdin io.WriteCloser + wg sync.WaitGroup + errors = make(chan error, 3) + ) + + if stdin != nil && openStdin { + cStdin = streamConfig.StdinPipe() + wg.Add(1) + } + + if stdout != nil { + cStdout = streamConfig.StdoutPipe() + wg.Add(1) + } + + if stderr != nil { + cStderr = streamConfig.StderrPipe() + wg.Add(1) + } + + // Connect stdin of container to the http conn. + go func() { + if stdin == nil || !openStdin { + return + } + logrus.Debugf("attach: stdin: begin") + + var err error + if tty { + _, err = copyEscapable(cStdin, stdin, keys) + } else { + _, err = io.Copy(cStdin, stdin) + + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: stdin: %s", err) + errors <- err + } + if stdinOnce && !tty { + cStdin.Close() + } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + } + logrus.Debugf("attach: stdin: end") + wg.Done() + }() + + attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { + if stream == nil { + return + } + + logrus.Debugf("attach: %s: begin", name) + _, err := io.Copy(stream, streamPipe) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: %s: %v", name, err) + errors <- err + } + // Make sure stdin gets closed + if stdin != nil { + stdin.Close() + } + streamPipe.Close() + logrus.Debugf("attach: %s: end", name) + wg.Done() + } + + go attachStream("stdout", stdout, cStdout) + go attachStream("stderr", stderr, cStderr) + + return promise.Go(func() error { + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-ctx.Done(): + // close all pipes + if cStdin != nil { + cStdin.Close() + } + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + <-done + } + close(errors) + for err := range errors { + if err != nil { + return err + } + } + return nil + }) +} + +// Code c/c from io.Copy() modified to handle escape sequence +func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { + if len(keys) == 0 { + // Default keys : ctrl-p ctrl-q + keys = []byte{16, 17} + } + buf := make([]byte, 32*1024) + for { + nr, er := src.Read(buf) + if nr > 0 { + // ---- Docker addition + for i, key := range keys { + if nr != 1 || buf[0] != key { + break + } + if i == len(keys)-1 { + if err := src.Close(); err != nil { + return 0, err + } + return 0, nil + } + nr, er = src.Read(buf) + } + // ---- End of docker + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} + +// ShouldRestartOnBoot decides whether the daemon should restart the container or not. +// This is based on the container's restart policy. +func (container *Container) ShouldRestartOnBoot() bool { + return container.HostConfig.RestartPolicy.Name == "always" || + (container.HostConfig.RestartPolicy.Name == "unless-stopped" && !container.HasBeenManuallyStopped) || + (container.HostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) +} + +// AddBindMountPoint adds a new bind mount point configuration to the container. +func (container *Container) AddBindMountPoint(name, source, destination string, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Name: name, + Source: source, + Destination: destination, + RW: rw, + } +} + +// AddLocalMountPoint adds a new local mount point configuration to the container. +func (container *Container) AddLocalMountPoint(name, destination string, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Name: name, + Driver: volume.DefaultDriverName, + Destination: destination, + RW: rw, + } +} + +// AddMountPointWithVolume adds a new mount point configured with a volume to the container. +func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Name: vol.Name(), + Driver: vol.DriverName(), + Destination: destination, + RW: rw, + Volume: vol, + CopyData: volume.DefaultCopyMode, + } +} + +// IsDestinationMounted checks whether a path is mounted on the container or not. +func (container *Container) IsDestinationMounted(destination string) bool { + return container.MountPoints[destination] != nil +} + +// StopSignal returns the signal used to stop the container. +func (container *Container) StopSignal() int { + var stopSignal syscall.Signal + if container.Config.StopSignal != "" { + stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) + } + + if int(stopSignal) == 0 { + stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) + } + return int(stopSignal) +} + +// InitDNSHostConfig ensures that the dns fields are never nil. +// New containers don't ever have those fields nil, +// but pre created containers can still have those nil values. +// The non-recommended host configuration in the start api can +// make these fields nil again, this corrects that issue until +// we remove that behavior for good. +// See https://github.com/docker/docker/pull/17779 +// for a more detailed explanation on why we don't want that. +func (container *Container) InitDNSHostConfig() { + container.Lock() + defer container.Unlock() + if container.HostConfig.DNS == nil { + container.HostConfig.DNS = make([]string, 0) + } + + if container.HostConfig.DNSSearch == nil { + container.HostConfig.DNSSearch = make([]string, 0) + } + + if container.HostConfig.DNSOptions == nil { + container.HostConfig.DNSOptions = make([]string, 0) + } +} + +// UpdateMonitor updates monitor configure for running container +func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { + type policySetter interface { + SetPolicy(containertypes.RestartPolicy) + } + + if rm, ok := container.RestartManager(false).(policySetter); ok { + rm.SetPolicy(restartPolicy) + } +} + +// FullHostname returns hostname and optional domain appended to it. +func (container *Container) FullHostname() string { + fullHostname := container.Config.Hostname + if container.Config.Domainname != "" { + fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) + } + return fullHostname +} + +// RestartManager returns the current restartmanager instace connected to container. +func (container *Container) RestartManager(reset bool) restartmanager.RestartManager { + if reset { + container.RestartCount = 0 + container.restartManager = nil + } + if container.restartManager == nil { + container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy) + } + return container.restartManager +} + +type attachContext struct { + ctx context.Context + cancel context.CancelFunc + mu sync.Mutex +} + +// InitAttachContext initialize or returns existing context for attach calls to +// track container liveness. +func (container *Container) InitAttachContext() context.Context { + container.attachContext.mu.Lock() + defer container.attachContext.mu.Unlock() + if container.attachContext.ctx == nil { + container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) + } + return container.attachContext.ctx +} + +// CancelAttachContext cancel attach context. All attach calls should detach +// after this call. +func (container *Container) CancelAttachContext() { + container.attachContext.mu.Lock() + if container.attachContext.ctx != nil { + container.attachContext.cancel() + container.attachContext.ctx = nil + } + container.attachContext.mu.Unlock() +} diff --git a/vendor/github.com/docker/docker/container/container_unix.go b/vendor/github.com/docker/docker/container/container_unix.go new file mode 100644 index 00000000..754090f9 --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_unix.go @@ -0,0 +1,405 @@ +// +build linux freebsd + +package container + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" +) + +// DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container +const DefaultSHMSize int64 = 67108864 + +// Container holds the fields specific to unixen implementations. +// See CommonContainer for standard fields common to all containers. +type Container struct { + CommonContainer + + // Fields below here are platform specific. + AppArmorProfile string + HostnamePath string + HostsPath string + ShmPath string + ResolvConfPath string + SeccompProfile string + NoNewPrivileges bool +} + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int + + // Whether the container encountered an OOM. + OOMKilled bool +} + +// CreateDaemonEnvironment returns the list of all environment variables given the list of +// environment variables related to links. +// Sets PATH, HOSTNAME and if container.Config.Tty is set: TERM. +// The defaults set here do not override the values in container.Config.Env +func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string { + // Setup environment + env := []string{ + "PATH=" + system.DefaultPathEnv, + "HOSTNAME=" + container.Config.Hostname, + } + if container.Config.Tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) + return env +} + +// TrySetNetworkMount attempts to set the network mounts given a provided destination and +// the path to use for it; return true if the given destination was a network mount file +func (container *Container) TrySetNetworkMount(destination string, path string) bool { + if destination == "/etc/resolv.conf" { + container.ResolvConfPath = path + return true + } + if destination == "/etc/hostname" { + container.HostnamePath = path + return true + } + if destination == "/etc/hosts" { + container.HostsPath = path + return true + } + + return false +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + hostnamePath, err := container.GetRootResourcePath("hostname") + if err != nil { + return err + } + container.HostnamePath = hostnamePath + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) +} + +// appendNetworkMounts appends any network mounts to the array of mount points passed in +func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) { + for _, mnt := range container.NetworkMounts() { + dest, err := container.GetResourcePath(mnt.Destination) + if err != nil { + return nil, err + } + volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest}) + } + return volumeMounts, nil +} + +// NetworkMounts returns the list of network mounts. +func (container *Container) NetworkMounts() []Mount { + var mounts []Mount + shared := container.HostConfig.NetworkMode.IsContainer() + if container.ResolvConfPath != "" { + if _, err := os.Stat(container.ResolvConfPath); err != nil { + logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) + } else { + label.Relabel(container.ResolvConfPath, container.MountLabel, shared) + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.ResolvConfPath, + Destination: "/etc/resolv.conf", + Writable: writable, + Propagation: volume.DefaultPropagationMode, + }) + } + } + if container.HostnamePath != "" { + if _, err := os.Stat(container.HostnamePath); err != nil { + logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) + } else { + label.Relabel(container.HostnamePath, container.MountLabel, shared) + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hostname"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostnamePath, + Destination: "/etc/hostname", + Writable: writable, + Propagation: volume.DefaultPropagationMode, + }) + } + } + if container.HostsPath != "" { + if _, err := os.Stat(container.HostsPath); err != nil { + logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) + } else { + label.Relabel(container.HostsPath, container.MountLabel, shared) + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hosts"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostsPath, + Destination: "/etc/hosts", + Writable: writable, + Propagation: volume.DefaultPropagationMode, + }) + } + } + return mounts +} + +// CopyImagePathContent copies files in destination to the volume. +func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { + rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) + if err != nil { + return err + } + + if _, err = ioutil.ReadDir(rootfs); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + path, err := v.Mount() + if err != nil { + return err + } + defer v.Unmount() + return copyExistingContents(rootfs, path) +} + +// ShmResourcePath returns path to shm +func (container *Container) ShmResourcePath() (string, error) { + return container.GetRootResourcePath("shm") +} + +// HasMountFor checks if path is a mountpoint +func (container *Container) HasMountFor(path string) bool { + _, exists := container.MountPoints[path] + return exists +} + +// UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { + if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() { + return + } + + var warnings []string + + if !container.HasMountFor("/dev/shm") { + shmPath, err := container.ShmResourcePath() + if err != nil { + logrus.Error(err) + warnings = append(warnings, err.Error()) + } else if shmPath != "" { + if err := unmount(shmPath); err != nil { + warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err)) + } + + } + } + + if len(warnings) > 0 { + logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n")) + } +} + +// IpcMounts returns the list of IPC mounts +func (container *Container) IpcMounts() []Mount { + var mounts []Mount + + if !container.HasMountFor("/dev/shm") { + label.SetFileLabel(container.ShmPath, container.MountLabel) + mounts = append(mounts, Mount{ + Source: container.ShmPath, + Destination: "/dev/shm", + Writable: true, + Propagation: volume.DefaultPropagationMode, + }) + } + + return mounts +} + +// UpdateContainer updates configuration of a container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + + // update resources of container + resources := hostConfig.Resources + cResources := &container.HostConfig.Resources + if resources.BlkioWeight != 0 { + cResources.BlkioWeight = resources.BlkioWeight + } + if resources.CPUShares != 0 { + cResources.CPUShares = resources.CPUShares + } + if resources.CPUPeriod != 0 { + cResources.CPUPeriod = resources.CPUPeriod + } + if resources.CPUQuota != 0 { + cResources.CPUQuota = resources.CPUQuota + } + if resources.CpusetCpus != "" { + cResources.CpusetCpus = resources.CpusetCpus + } + if resources.CpusetMems != "" { + cResources.CpusetMems = resources.CpusetMems + } + if resources.Memory != 0 { + cResources.Memory = resources.Memory + } + if resources.MemorySwap != 0 { + cResources.MemorySwap = resources.MemorySwap + } + if resources.MemoryReservation != 0 { + cResources.MemoryReservation = resources.MemoryReservation + } + if resources.KernelMemory != 0 { + cResources.KernelMemory = resources.KernelMemory + } + + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving updated container: %v", err) + return err + } + + return nil +} + +func detachMounted(path string) error { + return syscall.Unmount(path, syscall.MNT_DETACH) +} + +// UnmountVolumes unmounts all volumes +func (container *Container) UnmountVolumes(forceSyscall bool, volumeEventLog func(name, action string, attributes map[string]string)) error { + var ( + volumeMounts []volume.MountPoint + err error + ) + + for _, mntPoint := range container.MountPoints { + dest, err := container.GetResourcePath(mntPoint.Destination) + if err != nil { + return err + } + + volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest, Volume: mntPoint.Volume}) + } + + // Append any network mounts to the list (this is a no-op on Windows) + if volumeMounts, err = appendNetworkMounts(container, volumeMounts); err != nil { + return err + } + + for _, volumeMount := range volumeMounts { + if forceSyscall { + if err := detachMounted(volumeMount.Destination); err != nil { + logrus.Warnf("%s unmountVolumes: Failed to do lazy umount %v", container.ID, err) + } + } + + if volumeMount.Volume != nil { + if err := volumeMount.Volume.Unmount(); err != nil { + return err + } + + attributes := map[string]string{ + "driver": volumeMount.Volume.DriverName(), + "container": container.ID, + } + volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) + } + } + + return nil +} + +// copyExistingContents copies from the source to the destination and +// ensures the ownership is appropriately set. +func copyExistingContents(source, destination string) error { + volList, err := ioutil.ReadDir(source) + if err != nil { + return err + } + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + if len(srcList) == 0 { + // If the source volume is empty, copies files from the root into the volume + if err := chrootarchive.CopyWithTar(source, destination); err != nil { + return err + } + } + } + return copyOwnership(source, destination) +} + +// copyOwnership copies the permissions and uid:gid of the source file +// to the destination file +func copyOwnership(source, destination string) error { + stat, err := system.Stat(source) + if err != nil { + return err + } + + if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil { + return err + } + + return os.Chmod(destination, os.FileMode(stat.Mode())) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() []Mount { + var mounts []Mount + for dest, data := range container.HostConfig.Tmpfs { + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + return mounts +} + +// cleanResourcePath cleans a resource path and prepares to combine with mnt path +func cleanResourcePath(path string) string { + return filepath.Join(string(os.PathSeparator), path) +} + +// canMountFS determines if the file system for the container +// can be mounted locally. A no-op on non-Windows platforms +func (container *Container) canMountFS() bool { + return true +} diff --git a/vendor/github.com/docker/docker/container/history.go b/vendor/github.com/docker/docker/container/history.go new file mode 100644 index 00000000..c80c2aa0 --- /dev/null +++ b/vendor/github.com/docker/docker/container/history.go @@ -0,0 +1,30 @@ +package container + +import "sort" + +// History is a convenience type for storing a list of containers, +// sorted by creation date in descendant order. +type History []*Container + +// Len returns the number of containers in the history. +func (history *History) Len() int { + return len(*history) +} + +// Less compares two containers and returns true if the second one +// was created before the first one. +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].Created.Before(containers[i].Created) +} + +// Swap switches containers i and j positions in the history. +func (history *History) Swap(i, j int) { + containers := *history + containers[i], containers[j] = containers[j], containers[i] +} + +// sort orders the history by creation date in descendant order. +func (history *History) sort() { + sort.Sort(history) +} diff --git a/vendor/github.com/docker/docker/container/memory_store.go b/vendor/github.com/docker/docker/container/memory_store.go new file mode 100644 index 00000000..9fa1165d --- /dev/null +++ b/vendor/github.com/docker/docker/container/memory_store.go @@ -0,0 +1,92 @@ +package container + +import "sync" + +// memoryStore implements a Store in memory. +type memoryStore struct { + s map[string]*Container + sync.RWMutex +} + +// NewMemoryStore initializes a new memory store. +func NewMemoryStore() Store { + return &memoryStore{ + s: make(map[string]*Container), + } +} + +// Add appends a new container to the memory store. +// It overrides the id if it existed before. +func (c *memoryStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +// Get returns a container from the store by id. +func (c *memoryStore) Get(id string) *Container { + c.RLock() + res := c.s[id] + c.RUnlock() + return res +} + +// Delete removes a container from the store by id. +func (c *memoryStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +// List returns a sorted list of containers from the store. +// The containers are ordered by creation date. +func (c *memoryStore) List() []*Container { + containers := History(c.all()) + containers.sort() + return containers +} + +// Size returns the number of containers in the store. +func (c *memoryStore) Size() int { + c.RLock() + defer c.RUnlock() + return len(c.s) +} + +// First returns the first container found in the store by a given filter. +func (c *memoryStore) First(filter StoreFilter) *Container { + for _, cont := range c.all() { + if filter(cont) { + return cont + } + } + return nil +} + +// ApplyAll calls the reducer function with every container in the store. +// This operation is asyncronous in the memory store. +// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. +func (c *memoryStore) ApplyAll(apply StoreReducer) { + wg := new(sync.WaitGroup) + for _, cont := range c.all() { + wg.Add(1) + go func(container *Container) { + apply(container) + wg.Done() + }(cont) + } + + wg.Wait() +} + +func (c *memoryStore) all() []*Container { + c.RLock() + containers := make([]*Container, 0, len(c.s)) + for _, cont := range c.s { + containers = append(containers, cont) + } + c.RUnlock() + return containers +} + +var _ Store = &memoryStore{} diff --git a/vendor/github.com/docker/docker/container/monitor.go b/vendor/github.com/docker/docker/container/monitor.go new file mode 100644 index 00000000..ba82d875 --- /dev/null +++ b/vendor/github.com/docker/docker/container/monitor.go @@ -0,0 +1,60 @@ +package container + +import ( + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + loggerCloseTimeout = 10 * time.Second +) + +// supervisor defines the interface that a supervisor must implement +type supervisor interface { + // LogContainerEvent generates events related to a given container + LogContainerEvent(*Container, string) + // Cleanup ensures that the container is properly unmounted + Cleanup(*Container) + // StartLogging starts the logging driver for the container + StartLogging(*Container) error + // Run starts a container + Run(c *Container) error + // IsShuttingDown tells whether the supervisor is shutting down or not + IsShuttingDown() bool +} + +// Reset puts a container into a state where it can be restarted again. +func (container *Container) Reset(lock bool) { + if lock { + container.Lock() + defer container.Unlock() + } + + if err := container.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", container.ID, err) + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.NewInputPipes() + } + + if container.LogDriver != nil { + if container.LogCopier != nil { + exit := make(chan struct{}) + go func() { + container.LogCopier.Wait() + close(exit) + }() + select { + case <-time.After(loggerCloseTimeout): + logrus.Warnf("Logger didn't exit in time: logs may be truncated") + case <-exit: + } + } + container.LogDriver.Close() + container.LogCopier = nil + container.LogDriver = nil + } +} diff --git a/vendor/github.com/docker/docker/container/mounts_unix.go b/vendor/github.com/docker/docker/container/mounts_unix.go new file mode 100644 index 00000000..c52abed2 --- /dev/null +++ b/vendor/github.com/docker/docker/container/mounts_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Data string `json:"data"` + Propagation string `json:"mountpropagation"` +} diff --git a/vendor/github.com/docker/docker/container/state.go b/vendor/github.com/docker/docker/container/state.go new file mode 100644 index 00000000..a12a193e --- /dev/null +++ b/vendor/github.com/docker/docker/container/state.go @@ -0,0 +1,283 @@ +package container + +import ( + "fmt" + "sync" + "time" + + "github.com/docker/go-units" +) + +// State holds the current container state, and has methods to get and +// set the state. Container has an embed, which allows all of the +// functions defined against State to run against Container. +type State struct { + sync.Mutex + // FIXME: Why do we have both paused and running if a + // container cannot be paused and running at the same time? + Running bool + Paused bool + Restarting bool + OOMKilled bool + RemovalInProgress bool // Not need for this to be persistent on disk. + Dead bool + Pid int + ExitCode int + Error string // contains last known error when starting the container + StartedAt time.Time + FinishedAt time.Time + waitChan chan struct{} +} + +// NewState creates a default state object with a fresh channel for state changes. +func NewState() *State { + return &State{ + waitChan: make(chan struct{}), + } +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.RemovalInProgress { + return "Removal In Progress" + } + + if s.Dead { + return "Dead" + } + + if s.StartedAt.IsZero() { + return "Created" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.Dead { + return "dead" + } + + if s.StartedAt.IsZero() { + return "created" + } + + return "exited" +} + +// IsValidStateString checks if the provided string is a valid container state or not. +func IsValidStateString(s string) bool { + if s != "paused" && + s != "restarting" && + s != "running" && + s != "dead" && + s != "created" && + s != "exited" { + return false + } + return true +} + +func wait(waitChan <-chan struct{}, timeout time.Duration) error { + if timeout < 0 { + <-waitChan + return nil + } + select { + case <-time.After(timeout): + return fmt.Errorf("Timed out: %v", timeout) + case <-waitChan: + return nil + } +} + +// WaitRunning waits until state is running. If state is already +// running it returns immediately. If you want wait forever you must +// supply negative timeout. Returns pid, that was passed to +// SetRunning. +func (s *State) WaitRunning(timeout time.Duration) (int, error) { + s.Lock() + if s.Running { + pid := s.Pid + s.Unlock() + return pid, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + return s.GetPID(), nil +} + +// WaitStop waits until state is stopped. If state already stopped it returns +// immediately. If you want wait forever you must supply negative timeout. +// Returns exit code, that was passed to SetStoppedLocking +func (s *State) WaitStop(timeout time.Duration) (int, error) { + s.Lock() + if !s.Running { + exitCode := s.ExitCode + s.Unlock() + return exitCode, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + return s.getExitCode(), nil +} + +// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. +func (s *State) IsRunning() bool { + s.Lock() + res := s.Running + s.Unlock() + return res +} + +// GetPID holds the process id of a container. +func (s *State) GetPID() int { + s.Lock() + res := s.Pid + s.Unlock() + return res +} + +func (s *State) getExitCode() int { + s.Lock() + res := s.ExitCode + s.Unlock() + return res +} + +// SetRunning sets the state of the container to "running". +func (s *State) SetRunning(pid int, initial bool) { + s.Error = "" + s.Running = true + s.Paused = false + s.Restarting = false + s.ExitCode = 0 + s.Pid = pid + if initial { + s.StartedAt = time.Now().UTC() + } + close(s.waitChan) // fire waiters for start + s.waitChan = make(chan struct{}) +} + +// SetStoppedLocking locks the container state is sets it to "stopped". +func (s *State) SetStoppedLocking(exitStatus *ExitStatus) { + s.Lock() + s.SetStopped(exitStatus) + s.Unlock() +} + +// SetStopped sets the container state to "stopped" without locking. +func (s *State) SetStopped(exitStatus *ExitStatus) { + s.Running = false + s.Paused = false + s.Restarting = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetRestartingLocking is when docker handles the auto restart of containers when they are +// in the middle of a stop and being restarted again +func (s *State) SetRestartingLocking(exitStatus *ExitStatus) { + s.Lock() + s.SetRestarting(exitStatus) + s.Unlock() +} + +// SetRestarting sets the container state to "restarting". +// It also sets the container PID to 0. +func (s *State) SetRestarting(exitStatus *ExitStatus) { + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetError sets the container's error state. This is useful when we want to +// know the error that occurred when container transits to another state +// when inspecting it +func (s *State) SetError(err error) { + s.Error = err.Error() +} + +// IsPaused returns whether the container is paused or not. +func (s *State) IsPaused() bool { + s.Lock() + res := s.Paused + s.Unlock() + return res +} + +// IsRestarting returns whether the container is restarting or not. +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + +// SetRemovalInProgress sets the container state as being removed. +// It returns true if the container was already in that state. +func (s *State) SetRemovalInProgress() bool { + s.Lock() + defer s.Unlock() + if s.RemovalInProgress { + return true + } + s.RemovalInProgress = true + return false +} + +// ResetRemovalInProgress make the RemovalInProgress state to false. +func (s *State) ResetRemovalInProgress() { + s.Lock() + s.RemovalInProgress = false + s.Unlock() +} + +// SetDead sets the container state to "dead" +func (s *State) SetDead() { + s.Lock() + s.Dead = true + s.Unlock() +} diff --git a/vendor/github.com/docker/docker/container/state_unix.go b/vendor/github.com/docker/docker/container/state_unix.go new file mode 100644 index 00000000..8d25a237 --- /dev/null +++ b/vendor/github.com/docker/docker/container/state_unix.go @@ -0,0 +1,10 @@ +// +build linux freebsd + +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCode = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled +} diff --git a/vendor/github.com/docker/docker/container/store.go b/vendor/github.com/docker/docker/container/store.go new file mode 100644 index 00000000..042fb1a3 --- /dev/null +++ b/vendor/github.com/docker/docker/container/store.go @@ -0,0 +1,28 @@ +package container + +// StoreFilter defines a function to filter +// container in the store. +type StoreFilter func(*Container) bool + +// StoreReducer defines a function to +// manipulate containers in the store +type StoreReducer func(*Container) + +// Store defines an interface that +// any container store must implement. +type Store interface { + // Add appends a new container to the store. + Add(string, *Container) + // Get returns a container from the store by the identifier it was stored with. + Get(string) *Container + // Delete removes a container from the store by the identifier it was stored with. + Delete(string) + // List returns a list of containers from the store. + List() []*Container + // Size returns the number of containers in the store. + Size() int + // First returns the first container found in the store by a given filter. + First(StoreFilter) *Container + // ApplyAll calls the reducer function with every container in the store. + ApplyAll(StoreReducer) +} diff --git a/vendor/github.com/docker/docker/daemon/apparmor_default.go b/vendor/github.com/docker/docker/daemon/apparmor_default.go new file mode 100644 index 00000000..e4065b4a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/apparmor_default.go @@ -0,0 +1,30 @@ +// +build linux + +package daemon + +import ( + "github.com/Sirupsen/logrus" + aaprofile "github.com/docker/docker/profiles/apparmor" + "github.com/opencontainers/runc/libcontainer/apparmor" +) + +// Define constants for native driver +const ( + defaultApparmorProfile = "docker-default" +) + +func installDefaultAppArmorProfile() { + if apparmor.IsEnabled() { + if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil { + apparmorProfiles := []string{defaultApparmorProfile} + + // Allow daemon to run if loading failed, but are active + // (possibly through another run, manually, or via system startup) + for _, policy := range apparmorProfiles { + if err := aaprofile.IsLoaded(policy); err != nil { + logrus.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", policy) + } + } + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go b/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go new file mode 100644 index 00000000..f186a68a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go @@ -0,0 +1,6 @@ +// +build !linux + +package daemon + +func installDefaultAppArmorProfile() { +} diff --git a/vendor/github.com/docker/docker/daemon/archive.go b/vendor/github.com/docker/docker/daemon/archive.go new file mode 100644 index 00000000..fb023689 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive.go @@ -0,0 +1,432 @@ +package daemon + +import ( + "errors" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/builder" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/engine-api/types" +) + +// ErrExtractPointNotDirectory is used to convey that the operation to extract +// a tar archive to a directory in a container has failed because the specified +// path does not refer to a directory. +var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") + +// ErrRootFSReadOnly is returned when a container +var ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") + +// ContainerCopy performs a deprecated operation of archiving the resource at +// the specified path in the container identified by the given name. +func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if res[0] == '/' || res[0] == '\\' { + res = res[1:] + } + + return daemon.containerCopy(container, res) +} + +// ContainerStatPath stats the filesystem resource at the specified path in the +// container identified by the given name. +func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + return daemon.containerStatPath(container, path) +} + +// ContainerArchivePath creates an archive of the filesystem resource at the +// specified path in the container identified by the given name. Returns a +// tar archive of the resource and whether it was a directory or a single file. +func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, nil, err + } + + return daemon.containerArchivePath(container, path) +} + +// ContainerExtractToDir extracts the given archive to the specified location +// in the filesystem of the container identified by the given name. The given +// path must be of a directory in the container. If it is not, the error will +// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will +// be an error if unpacking the given content would cause an existing directory +// to be replaced with a non-directory and vice versa. +func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content) +} + +// containerStatPath stats the filesystem resource at the specified path in this +// container. Returns stat info about the resource. +func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return nil, err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.UnmountVolumes(true, daemon.LogVolumeEvent) + if err != nil { + return nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, err + } + + return container.StatPath(resolvedPath, absPath) +} + +// containerArchivePath creates an archive of the filesystem resource at the specified +// path in this container. Returns a tar archive of the resource and stat info +// about the resource. +func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err = daemon.Mount(container); err != nil { + return nil, nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.UnmountVolumes(true, daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err = daemon.mountVolumes(container); err != nil { + return nil, nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, nil, err + } + + stat, err = container.StatPath(resolvedPath, absPath) + if err != nil { + return nil, nil, err + } + + // We need to rebase the archive entries if the last element of the + // resolved path was a symlink that was evaluated and is now different + // than the requested path. For example, if the given path was "/foo/bar/", + // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want + // to ensure that the archive entries start with "bar" and not "baz". This + // also catches the case when the root directory of the container is + // requested: we want the archive entries to start with "/" and not the + // container ID. + data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) + if err != nil { + return nil, nil, err + } + + content = ioutils.NewReadCloserWrapper(data, func() error { + err := data.Close() + container.UnmountVolumes(true, daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + + daemon.LogContainerEvent(container, "archive-path") + + return content, stat, nil +} + +// containerExtractToDir extracts the given tar archive to the specified location in the +// filesystem of this container. The given path must be of a directory in the +// container. If it is not, the error will be ErrExtractPointNotDirectory. If +// noOverwriteDirNonDir is true then it will be an error if unpacking the +// given content would cause an existing directory to be replaced with a non- +// directory and vice versa. +func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.UnmountVolumes(true, daemon.LogVolumeEvent) + if err != nil { + return err + } + + // The destination path needs to be resolved to a host path, with all + // symbolic links followed in the scope of the container's rootfs. Note + // that we do not use `container.ResolvePath(path)` here because we need + // to also evaluate the last path element if it is a symlink. This is so + // that you can extract an archive to a symlink that points to a directory. + + // Consider the given path as an absolute path in the container. + absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // This will evaluate the last path element if it is a symlink. + resolvedPath, err := container.GetResourcePath(absPath) + if err != nil { + return err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return err + } + + if !stat.IsDir() { + return ErrExtractPointNotDirectory + } + + // Need to check if the path is in a volume. If it is, it cannot be in a + // read-only volume. If it is not in a volume, the container cannot be + // configured with a read-only rootfs. + + // Use the resolved path relative to the container rootfs as the new + // absPath. This way we fully follow any symlinks in a volume that may + // lead back outside the volume. + // + // The Windows implementation of filepath.Rel in golang 1.4 does not + // support volume style file path semantics. On Windows when using the + // filter driver, we are guaranteed that the path will always be + // a volume file path. + var baseRel string + if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { + if strings.HasPrefix(resolvedPath, container.BaseFS) { + baseRel = resolvedPath[len(container.BaseFS):] + if baseRel[:1] == `\` { + baseRel = baseRel[1:] + } + } + } else { + baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) + } + if err != nil { + return err + } + // Make it an absolute path. + absPath = filepath.Join(string(filepath.Separator), baseRel) + + toVolume, err := checkIfPathIsInAVolume(container, absPath) + if err != nil { + return err + } + + if !toVolume && container.HostConfig.ReadonlyRootfs { + return ErrRootFSReadOnly + } + + uid, gid := daemon.GetRemappedUIDGID() + options := &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + ChownOpts: &archive.TarChownOptions{ + UID: uid, GID: gid, // TODO: should all ownership be set to root (either real or remapped)? + }, + } + if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { + return err + } + + daemon.LogContainerEvent(container, "extract-to-dir") + + return nil +} + +func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err := daemon.Mount(container); err != nil { + return nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.UnmountVolumes(true, daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err := daemon.mountVolumes(container); err != nil { + return nil, err + } + + basePath, err := container.GetResourcePath(resource) + if err != nil { + return nil, err + } + stat, err := os.Stat(basePath) + if err != nil { + return nil, err + } + var filter []string + if !stat.IsDir() { + d, f := filepath.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{filepath.Base(basePath)} + basePath = filepath.Dir(basePath) + } + archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + IncludeFiles: filter, + }) + if err != nil { + return nil, err + } + + reader := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.UnmountVolumes(true, daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + daemon.LogContainerEvent(container, "copy") + return reader, nil +} + +// CopyOnBuild copies/extracts a source FileInfo to a destination path inside a container +// specified by a container object. +// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). +// CopyOnBuild should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. +func (daemon *Daemon) CopyOnBuild(cID string, destPath string, src builder.FileInfo, decompress bool) error { + srcPath := src.Path() + destExists := true + destDir := false + rootUID, rootGID := daemon.GetRemappedUIDGID() + + // Work in daemon-local OS specific file paths + destPath = filepath.FromSlash(destPath) + + c, err := daemon.GetContainer(cID) + if err != nil { + return err + } + err = daemon.Mount(c) + if err != nil { + return err + } + defer daemon.Unmount(c) + + dest, err := c.GetResourcePath(destPath) + if err != nil { + return err + } + + // Preserve the trailing slash + // TODO: why are we appending another path separator if there was already one? + if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." { + destDir = true + dest += string(os.PathSeparator) + } + + destPath = dest + + destStat, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + //logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err) + return err + } + destExists = false + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archiver := &archive.Archiver{ + Untar: chrootarchive.Untar, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + + if src.IsDir() { + // copy as directory + if err := archiver.CopyWithTar(srcPath, destPath); err != nil { + return err + } + return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) + } + if decompress && archive.IsArchivePath(srcPath) { + // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) + + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in slash + tarDest := destPath + if strings.HasSuffix(tarDest, string(os.PathSeparator)) { + tarDest = filepath.Dir(destPath) + } + + // try to successfully untar the orig + err := archiver.UntarPath(srcPath, tarDest) + /* + if err != nil { + logrus.Errorf("Couldn't untar to %s: %v", tarDest, err) + } + */ + return err + } + + // only needed for fixPermissions, but might as well put it before CopyFileWithTar + if destDir || (destExists && destStat.IsDir()) { + destPath = filepath.Join(destPath, src.Name()) + } + + if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil { + return err + } + if err := archiver.CopyFileWithTar(srcPath, destPath); err != nil { + return err + } + + return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) +} diff --git a/vendor/github.com/docker/docker/daemon/archive_unix.go b/vendor/github.com/docker/docker/daemon/archive_unix.go new file mode 100644 index 00000000..fcea13c9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive_unix.go @@ -0,0 +1,57 @@ +// +build !windows + +package daemon + +import ( + "github.com/docker/docker/container" + "os" + "path/filepath" +) + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + var toVolume bool + for _, mnt := range container.MountPoints { + if toVolume = mnt.HasResource(absPath); toVolume { + if mnt.RW { + break + } + return false, ErrVolumeReadonly + } + } + return toVolume, nil +} + +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // If the destination didn't already exist, or the destination isn't a + // directory, then we should Lchown the destination. Otherwise, we shouldn't + // Lchown the destination. + destStat, err := os.Stat(destination) + if err != nil { + // This should *never* be reached, because the destination must've already + // been created while untar-ing the context. + return err + } + doChownDestination := !destExisted || !destStat.IsDir() + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if !doChownDestination && (source == fullpath) { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) + if err != nil { + return err + } + + fullpath = filepath.Join(destination, cleaned) + return os.Lchown(fullpath, uid, gid) + }) +} diff --git a/vendor/github.com/docker/docker/daemon/attach.go b/vendor/github.com/docker/docker/daemon/attach.go new file mode 100644 index 00000000..79e9cd51 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/attach.go @@ -0,0 +1,120 @@ +package daemon + +import ( + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/stdcopy" +) + +// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. +func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error { + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + if container.IsPaused() { + err := fmt.Errorf("Container %s is paused. Unpause the container before attach", prefixOrName) + return errors.NewRequestConflictError(err) + } + + inStream, outStream, errStream, err := c.GetStreams() + if err != nil { + return err + } + defer inStream.Close() + + if !container.Config.Tty && c.MuxStreams { + errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + var stdin io.ReadCloser + var stdout, stderr io.Writer + + if c.UseStdin { + stdin = inStream + } + if c.UseStdout { + stdout = outStream + } + if c.UseStderr { + stderr = errStream + } + + if err := daemon.containerAttach(container, stdin, stdout, stderr, c.Logs, c.Stream, c.DetachKeys); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + } + return nil +} + +// ContainerAttachRaw attaches the provided streams to the container's stdio +func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error { + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + return daemon.containerAttach(container, stdin, stdout, stderr, false, stream, nil) +} + +func (daemon *Daemon) containerAttach(container *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool, keys []byte) error { + if logs { + logDriver, err := daemon.getLogger(container) + if err != nil { + return err + } + cLog, ok := logDriver.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) + + LogLoop: + for { + select { + case msg, ok := <-logs.Msg: + if !ok { + break LogLoop + } + if msg.Source == "stdout" && stdout != nil { + stdout.Write(msg.Line) + } + if msg.Source == "stderr" && stderr != nil { + stderr.Write(msg.Line) + } + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + break LogLoop + } + } + } + + daemon.LogContainerEvent(container, "attach") + + //stream + if stream { + var stdinPipe io.ReadCloser + if stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debugf("Closing buffered stdin pipe") + io.Copy(w, stdin) + }() + stdinPipe = r + } + <-container.Attach(stdinPipe, stdout, stderr, keys) + // If we are in stdinonce mode, wait for the process to end + // otherwise, simply return + if container.Config.StdinOnce && !container.Config.Tty { + container.WaitStop(-1 * time.Second) + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/caps/utils_unix.go b/vendor/github.com/docker/docker/daemon/caps/utils_unix.go new file mode 100644 index 00000000..c99485f5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/caps/utils_unix.go @@ -0,0 +1,131 @@ +// +build !windows + +package caps + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/stringutils" + "github.com/syndtr/gocapability/capability" +) + +var capabilityList Capabilities + +func init() { + last := capability.CAP_LAST_CAP + // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap + if last == capability.Cap(63) { + last = capability.CAP_BLOCK_SUSPEND + } + for _, cap := range capability.List() { + if cap > last { + continue + } + capabilityList = append(capabilityList, + &CapabilityMapping{ + Key: "CAP_" + strings.ToUpper(cap.String()), + Value: cap, + }, + ) + } +} + +type ( + // CapabilityMapping maps linux capability name to its value of capability.Cap type + // Capabilities is one of the security systems in Linux Security Module (LSM) + // framework provided by the kernel. + // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html + CapabilityMapping struct { + Key string `json:"key,omitempty"` + Value capability.Cap `json:"value,omitempty"` + } + // Capabilities contains all CapabilityMapping + Capabilities []*CapabilityMapping +) + +// String returns of CapabilityMapping +func (c *CapabilityMapping) String() string { + return c.Key +} + +// GetCapability returns CapabilityMapping which contains specific key +func GetCapability(key string) *CapabilityMapping { + for _, capp := range capabilityList { + if capp.Key == key { + cpy := *capp + return &cpy + } + } + return nil +} + +// GetAllCapabilities returns all of the capabilities +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + +// TweakCapabilities can tweak capabilities by adding or dropping capabilities +// based on the basics capabilities. +func TweakCapabilities(basics, adds, drops []string) ([]string, error) { + var ( + newCaps []string + allCaps = GetAllCapabilities() + ) + + // FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix + // Currently they are mixed in here. We should do conversion in one place. + + // look for invalid cap in the drop list + for _, cap := range drops { + if strings.ToLower(cap) == "all" { + continue + } + + if !stringutils.InSlice(allCaps, "CAP_"+cap) { + return nil, fmt.Errorf("Unknown capability drop: %q", cap) + } + } + + // handle --cap-add=all + if stringutils.InSlice(adds, "all") { + basics = allCaps + } + + if !stringutils.InSlice(drops, "all") { + for _, cap := range basics { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + // if we don't drop `all`, add back all the non-dropped caps + if !stringutils.InSlice(drops, cap[4:]) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + } + + for _, cap := range adds { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + cap = "CAP_" + cap + + if !stringutils.InSlice(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability to add: %q", cap) + } + + // add cap if not already in the list + if !stringutils.InSlice(newCaps, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + return newCaps, nil +} diff --git a/vendor/github.com/docker/docker/daemon/changes.go b/vendor/github.com/docker/docker/daemon/changes.go new file mode 100644 index 00000000..5bc5b9d5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/changes.go @@ -0,0 +1,15 @@ +package daemon + +import "github.com/docker/docker/pkg/archive" + +// ContainerChanges returns a list of container fs changes +func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + return daemon.changes(container) +} diff --git a/vendor/github.com/docker/docker/daemon/commit.go b/vendor/github.com/docker/docker/daemon/commit.go new file mode 100644 index 00000000..7cdf80c7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/commit.go @@ -0,0 +1,233 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "runtime" + "strings" + "time" + + "github.com/docker/docker/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + containertypes "github.com/docker/engine-api/types/container" + "github.com/docker/go-connections/nat" +) + +// merge merges two Config, the image container configuration (defaults values), +// and the user container configuration, either passed by the API or generated +// by the cli. +// It will mutate the specified user configuration (userConf) with the image +// configuration where the user configuration is incomplete. +func merge(userConf, imageConf *containertypes.Config) error { + if userConf.User == "" { + userConf.User = imageConf.User + } + if len(userConf.ExposedPorts) == 0 { + userConf.ExposedPorts = imageConf.ExposedPorts + } else if imageConf.ExposedPorts != nil { + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + for port := range imageConf.ExposedPorts { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if len(userConf.Env) == 0 { + userConf.Env = imageConf.Env + } else { + for _, imageEnv := range imageConf.Env { + found := false + imageEnvKey := strings.Split(imageEnv, "=")[0] + for _, userEnv := range userConf.Env { + userEnvKey := strings.Split(userEnv, "=")[0] + if imageEnvKey == userEnvKey { + found = true + break + } + } + if !found { + userConf.Env = append(userConf.Env, imageEnv) + } + } + } + + if userConf.Labels == nil { + userConf.Labels = map[string]string{} + } + if imageConf.Labels != nil { + for l := range userConf.Labels { + imageConf.Labels[l] = userConf.Labels[l] + } + userConf.Labels = imageConf.Labels + } + + if len(userConf.Entrypoint) == 0 { + if len(userConf.Cmd) == 0 { + userConf.Cmd = imageConf.Cmd + } + + if userConf.Entrypoint == nil { + userConf.Entrypoint = imageConf.Entrypoint + } + } + if userConf.WorkingDir == "" { + userConf.WorkingDir = imageConf.WorkingDir + } + if len(userConf.Volumes) == 0 { + userConf.Volumes = imageConf.Volumes + } else { + for k, v := range imageConf.Volumes { + userConf.Volumes[k] = v + } + } + + if userConf.StopSignal == "" { + userConf.StopSignal = imageConf.StopSignal + } + return nil +} + +// Commit creates a new filesystem image from the current state of a container. +// The image can optionally be tagged into a repository. +func (daemon *Daemon) Commit(name string, c *types.ContainerCommitConfig) (string, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return "", err + } + + // It is not possible to commit a running container on Windows + if runtime.GOOS == "windows" && container.IsRunning() { + return "", fmt.Errorf("Windows does not support commit of a running container") + } + + if c.Pause && !container.IsPaused() { + daemon.containerPause(container) + defer daemon.containerUnpause(container) + } + + if c.MergeConfigs { + if err := merge(c.Config, container.Config); err != nil { + return "", err + } + } + + rwTar, err := daemon.exportContainerRw(container) + if err != nil { + return "", err + } + defer func() { + if rwTar != nil { + rwTar.Close() + } + }() + + var history []image.History + rootFS := image.NewRootFS() + + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return "", err + } + history = img.History + rootFS = img.RootFS + } + + l, err := daemon.layerStore.Register(rwTar, rootFS.ChainID()) + if err != nil { + return "", err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + h := image.History{ + Author: c.Author, + Created: time.Now().UTC(), + CreatedBy: strings.Join(container.Config.Cmd, " "), + Comment: c.Comment, + EmptyLayer: true, + } + + if diffID := l.DiffID(); layer.DigestSHA256EmptyTar != diffID { + h.EmptyLayer = false + rootFS.Append(diffID) + } + + history = append(history, h) + + config, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: c.Config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Container: container.ID, + ContainerConfig: *container.Config, + Author: c.Author, + Created: h.Created, + }, + RootFS: rootFS, + History: history, + }) + + if err != nil { + return "", err + } + + id, err := daemon.imageStore.Create(config) + if err != nil { + return "", err + } + + if container.ImageID != "" { + if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil { + return "", err + } + } + + if c.Repo != "" { + newTag, err := reference.WithName(c.Repo) // todo: should move this to API layer + if err != nil { + return "", err + } + if c.Tag != "" { + if newTag, err = reference.WithTag(newTag, c.Tag); err != nil { + return "", err + } + } + if err := daemon.TagImage(newTag, id.String()); err != nil { + return "", err + } + } + + attributes := map[string]string{ + "comment": c.Comment, + } + daemon.LogContainerEventWithAttributes(container, "commit", attributes) + return id.String(), nil +} + +func (daemon *Daemon) exportContainerRw(container *container.Container) (archive.Archive, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + archive, err := container.RWLayer.TarStream() + if err != nil { + daemon.Unmount(container) // logging is already handled in the `Unmount` function + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + archive.Close() + return container.RWLayer.Unmount() + }), + nil +} diff --git a/vendor/github.com/docker/docker/daemon/config.go b/vendor/github.com/docker/docker/daemon/config.go new file mode 100644 index 00000000..c293935c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config.go @@ -0,0 +1,358 @@ +package daemon + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/registry" + "github.com/imdario/mergo" +) + +const ( + defaultNetworkMtu = 1500 + disableNetworkBridge = "none" +) + +// flatOptions contains configuration keys +// that MUST NOT be parsed as deep structures. +// Use this to differentiate these options +// with others like the ones in CommonTLSOptions. +var flatOptions = map[string]bool{ + "cluster-store-opts": true, + "log-opts": true, +} + +// LogConfig represents the default log configuration. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type LogConfig struct { + Type string `json:"log-driver,omitempty"` + Config map[string]string `json:"log-opts,omitempty"` +} + +// CommonTLSOptions defines TLS configuration for the daemon server. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type CommonTLSOptions struct { + CAFile string `json:"tlscacert,omitempty"` + CertFile string `json:"tlscert,omitempty"` + KeyFile string `json:"tlskey,omitempty"` +} + +// CommonConfig defines the configuration of a docker daemon which are +// common across platforms. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type CommonConfig struct { + AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins + AutoRestart bool `json:"-"` + Context map[string][]string `json:"-"` + DisableBridge bool `json:"-"` + DNS []string `json:"dns,omitempty"` + DNSOptions []string `json:"dns-opts,omitempty"` + DNSSearch []string `json:"dns-search,omitempty"` + ExecOptions []string `json:"exec-opts,omitempty"` + ExecRoot string `json:"exec-root,omitempty"` + GraphDriver string `json:"storage-driver,omitempty"` + GraphOptions []string `json:"storage-opts,omitempty"` + Labels []string `json:"labels,omitempty"` + Mtu int `json:"mtu,omitempty"` + Pidfile string `json:"pidfile,omitempty"` + RawLogs bool `json:"raw-logs,omitempty"` + Root string `json:"graph,omitempty"` + SocketGroup string `json:"group,omitempty"` + TrustKeyPath string `json:"-"` + + // ClusterStore is the storage backend used for the cluster information. It is used by both + // multihost networking (to store networks and endpoints information) and by the node discovery + // mechanism. + ClusterStore string `json:"cluster-store,omitempty"` + + // ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such + // as TLS configuration settings. + ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"` + + // ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node + // discovery. This should be a 'host:port' combination on which that daemon instance is + // reachable by other hosts. + ClusterAdvertise string `json:"cluster-advertise,omitempty"` + + Debug bool `json:"debug,omitempty"` + Hosts []string `json:"hosts,omitempty"` + LogLevel string `json:"log-level,omitempty"` + TLS bool `json:"tls,omitempty"` + TLSVerify bool `json:"tlsverify,omitempty"` + + // Embedded structs that allow config + // deserialization without the full struct. + CommonTLSOptions + LogConfig + bridgeConfig // bridgeConfig holds bridge network specific configuration. + registry.ServiceOptions + + reloadLock sync.Mutex + valuesSet map[string]interface{} +} + +// InstallCommonFlags adds command-line options to the top-level flag parser for +// the current process. +// Subsequent calls to `flag.Parse` will populate config with values parsed +// from the command-line. +func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string) string) { + config.ServiceOptions.InstallCliFlags(cmd, usageFn) + + cmd.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), []string{"-storage-opt"}, usageFn("Set storage driver options")) + cmd.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), []string{"-authorization-plugin"}, usageFn("List authorization plugins in order from first evaluator to last")) + cmd.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Set runtime execution options")) + cmd.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, usageFn("Path to use for daemon PID file")) + cmd.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, usageFn("Root of the Docker runtime")) + cmd.StringVar(&config.ExecRoot, []string{"-exec-root"}, defaultExecRoot, usageFn("Root directory for execution state files")) + cmd.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, usageFn("--restart on the daemon has been deprecated in favor of --restart policies on docker run")) + cmd.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", usageFn("Storage driver to use")) + cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU")) + cmd.BoolVar(&config.RawLogs, []string{"-raw-logs"}, false, usageFn("Full timestamps without ANSI coloring")) + // FIXME: why the inconsistency between "hosts" and "sockets"? + cmd.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use")) + cmd.Var(opts.NewNamedListOptsRef("dns-opts", &config.DNSOptions, nil), []string{"-dns-opt"}, usageFn("DNS options to use")) + cmd.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use")) + cmd.Var(opts.NewNamedListOptsRef("labels", &config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon")) + cmd.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", usageFn("Default driver for container logs")) + cmd.Var(opts.NewNamedMapOpts("log-opts", config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options")) + cmd.StringVar(&config.ClusterAdvertise, []string{"-cluster-advertise"}, "", usageFn("Address or interface name to advertise")) + cmd.StringVar(&config.ClusterStore, []string{"-cluster-store"}, "", usageFn("Set the cluster store")) + cmd.Var(opts.NewNamedMapOpts("cluster-store-opts", config.ClusterOpts, nil), []string{"-cluster-store-opt"}, usageFn("Set cluster store options")) +} + +// IsValueSet returns true if a configuration value +// was explicitly set in the configuration file. +func (config *Config) IsValueSet(name string) bool { + if config.valuesSet == nil { + return false + } + _, ok := config.valuesSet[name] + return ok +} + +// ReloadConfiguration reads the configuration in the host and reloads the daemon and server. +func ReloadConfiguration(configFile string, flags *flag.FlagSet, reload func(*Config)) error { + logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) + newConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return err + } + + if err := validateConfiguration(newConfig); err != nil { + return fmt.Errorf("file configuration validation failed (%v)", err) + } + + reload(newConfig) + return nil +} + +// boolValue is an interface that boolean value flags implement +// to tell the command line how to make -name equivalent to -name=true. +type boolValue interface { + IsBoolFlag() bool +} + +// MergeDaemonConfigurations reads a configuration file, +// loads the file configuration in an isolated structure, +// and merges the configuration provided from flags on top +// if there are no conflicts. +func MergeDaemonConfigurations(flagsConfig *Config, flags *flag.FlagSet, configFile string) (*Config, error) { + fileConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return nil, err + } + + if err := validateConfiguration(fileConfig); err != nil { + return nil, fmt.Errorf("file configuration validation failed (%v)", err) + } + + // merge flags configuration on top of the file configuration + if err := mergo.Merge(fileConfig, flagsConfig); err != nil { + return nil, err + } + + return fileConfig, nil +} + +// getConflictFreeConfiguration loads the configuration from a JSON file. +// It compares that configuration with the one provided by the flags, +// and returns an error if there are conflicts. +func getConflictFreeConfiguration(configFile string, flags *flag.FlagSet) (*Config, error) { + b, err := ioutil.ReadFile(configFile) + if err != nil { + return nil, err + } + + var config Config + var reader io.Reader + if flags != nil { + var jsonConfig map[string]interface{} + reader = bytes.NewReader(b) + if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil { + return nil, err + } + + configSet := configValuesSet(jsonConfig) + + if err := findConfigurationConflicts(configSet, flags); err != nil { + return nil, err + } + + // Override flag values to make sure the values set in the config file with nullable values, like `false`, + // are not overriden by default truthy values from the flags that were not explicitly set. + // See https://github.com/docker/docker/issues/20289 for an example. + // + // TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers. + namedOptions := make(map[string]interface{}) + for key, value := range configSet { + f := flags.Lookup("-" + key) + if f == nil { // ignore named flags that don't match + namedOptions[key] = value + continue + } + + if _, ok := f.Value.(boolValue); ok { + f.Value.Set(fmt.Sprintf("%v", value)) + } + } + if len(namedOptions) > 0 { + // set also default for mergeVal flags that are boolValue at the same time. + flags.VisitAll(func(f *flag.Flag) { + if opt, named := f.Value.(opts.NamedOption); named { + v, set := namedOptions[opt.Name()] + _, boolean := f.Value.(boolValue) + if set && boolean { + f.Value.Set(fmt.Sprintf("%v", v)) + } + } + }) + } + + config.valuesSet = configSet + } + + reader = bytes.NewReader(b) + err = json.NewDecoder(reader).Decode(&config) + return &config, err +} + +// configValuesSet returns the configuration values explicitly set in the file. +func configValuesSet(config map[string]interface{}) map[string]interface{} { + flatten := make(map[string]interface{}) + for k, v := range config { + if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] { + for km, vm := range m { + flatten[km] = vm + } + continue + } + + flatten[k] = v + } + return flatten +} + +// findConfigurationConflicts iterates over the provided flags searching for +// duplicated configurations and unknown keys. It returns an error with all the conflicts if +// it finds any. +func findConfigurationConflicts(config map[string]interface{}, flags *flag.FlagSet) error { + // 1. Search keys from the file that we don't recognize as flags. + unknownKeys := make(map[string]interface{}) + for key, value := range config { + flagName := "-" + key + if flag := flags.Lookup(flagName); flag == nil { + unknownKeys[key] = value + } + } + + // 2. Discard values that implement NamedOption. + // Their configuration name differs from their flag name, like `labels` and `label`. + if len(unknownKeys) > 0 { + unknownNamedConflicts := func(f *flag.Flag) { + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if _, valid := unknownKeys[namedOption.Name()]; valid { + delete(unknownKeys, namedOption.Name()) + } + } + } + flags.VisitAll(unknownNamedConflicts) + } + + if len(unknownKeys) > 0 { + var unknown []string + for key := range unknownKeys { + unknown = append(unknown, key) + } + return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) + } + + var conflicts []string + printConflict := func(name string, flagValue, fileValue interface{}) string { + return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue) + } + + // 3. Search keys that are present as a flag and as a file option. + duplicatedConflicts := func(f *flag.Flag) { + // search option name in the json configuration payload if the value is a named option + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if optsValue, ok := config[namedOption.Name()]; ok { + conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue)) + } + } else { + // search flag name in the json configuration payload without trailing dashes + for _, name := range f.Names { + name = strings.TrimLeft(name, "-") + + if value, ok := config[name]; ok { + conflicts = append(conflicts, printConflict(name, f.Value.String(), value)) + break + } + } + } + } + + flags.Visit(duplicatedConflicts) + + if len(conflicts) > 0 { + return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) + } + return nil +} + +// validateConfiguration validates some specific configs. +// such as config.DNS, config.Labels, config.DNSSearch +func validateConfiguration(config *Config) error { + // validate DNS + for _, dns := range config.DNS { + if _, err := opts.ValidateIPAddress(dns); err != nil { + return err + } + } + + // validate DNSSearch + for _, dnsSearch := range config.DNSSearch { + if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil { + return err + } + } + + // validate Labels + for _, label := range config.Labels { + if _, err := opts.ValidateLabel(label); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/config_experimental.go b/vendor/github.com/docker/docker/daemon/config_experimental.go new file mode 100644 index 00000000..ceb7c382 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_experimental.go @@ -0,0 +1,8 @@ +// +build experimental + +package daemon + +import flag "github.com/docker/docker/pkg/mflag" + +func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { +} diff --git a/vendor/github.com/docker/docker/daemon/config_stub.go b/vendor/github.com/docker/docker/daemon/config_stub.go new file mode 100644 index 00000000..796e6b6e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_stub.go @@ -0,0 +1,8 @@ +// +build !experimental + +package daemon + +import flag "github.com/docker/docker/pkg/mflag" + +func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { +} diff --git a/vendor/github.com/docker/docker/daemon/config_unix.go b/vendor/github.com/docker/docker/daemon/config_unix.go new file mode 100644 index 00000000..5394949e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_unix.go @@ -0,0 +1,88 @@ +// +build linux freebsd + +package daemon + +import ( + "net" + + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-units" +) + +var ( + defaultPidFile = "/var/run/docker.pid" + defaultGraph = "/var/lib/docker" + defaultExecRoot = "/var/run/docker" +) + +// Config defines the configuration of a docker daemon. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type Config struct { + CommonConfig + + // Fields below here are platform specific. + + CorsHeaders string `json:"api-cors-headers,omitempty"` + EnableCors bool `json:"api-enable-cors,omitempty"` + EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` + RemappedRoot string `json:"userns-remap,omitempty"` + CgroupParent string `json:"cgroup-parent,omitempty"` + Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` + ContainerdAddr string `json:"containerd,omitempty"` +} + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + EnableIPv6 bool `json:"ipv6,omitempty"` + EnableIPTables bool `json:"iptables,omitempty"` + EnableIPForward bool `json:"ip-forward,omitempty"` + EnableIPMasq bool `json:"ip-mask,omitempty"` + EnableUserlandProxy bool `json:"userland-proxy,omitempty"` + DefaultIP net.IP `json:"ip,omitempty"` + Iface string `json:"bridge,omitempty"` + IP string `json:"bip,omitempty"` + FixedCIDR string `json:"fixed-cidr,omitempty"` + FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` + DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` + DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` + InterContainerCommunication bool `json:"icc,omitempty"` +} + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +// Subsequent calls to `flag.Parse` will populate config with values parsed +// from the command-line. +func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(cmd, usageFn) + + // Then platform-specific install flags + cmd.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, usageFn("Enable selinux support")) + cmd.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", usageFn("Group for the unix socket")) + config.Ulimits = make(map[string]*units.Ulimit) + cmd.Var(runconfigopts.NewUlimitOpt(&config.Ulimits), []string{"-default-ulimit"}, usageFn("Set default ulimits for containers")) + cmd.BoolVar(&config.bridgeConfig.EnableIPTables, []string{"#iptables", "-iptables"}, true, usageFn("Enable addition of iptables rules")) + cmd.BoolVar(&config.bridgeConfig.EnableIPForward, []string{"#ip-forward", "-ip-forward"}, true, usageFn("Enable net.ipv4.ip_forward")) + cmd.BoolVar(&config.bridgeConfig.EnableIPMasq, []string{"-ip-masq"}, true, usageFn("Enable IP masquerading")) + cmd.BoolVar(&config.bridgeConfig.EnableIPv6, []string{"-ipv6"}, false, usageFn("Enable IPv6 networking")) + cmd.StringVar(&config.bridgeConfig.IP, []string{"#bip", "-bip"}, "", usageFn("Specify network bridge IP")) + cmd.StringVar(&config.bridgeConfig.Iface, []string{"b", "-bridge"}, "", usageFn("Attach containers to a network bridge")) + cmd.StringVar(&config.bridgeConfig.FixedCIDR, []string{"-fixed-cidr"}, "", usageFn("IPv4 subnet for fixed IPs")) + cmd.StringVar(&config.bridgeConfig.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", usageFn("IPv6 subnet for fixed IPs")) + cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv4, ""), []string{"-default-gateway"}, usageFn("Container default gateway IPv4 address")) + cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv6, ""), []string{"-default-gateway-v6"}, usageFn("Container default gateway IPv6 address")) + cmd.BoolVar(&config.bridgeConfig.InterContainerCommunication, []string{"#icc", "-icc"}, true, usageFn("Enable inter-container communication")) + cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultIP, "0.0.0.0"), []string{"#ip", "-ip"}, usageFn("Default IP when binding container ports")) + cmd.BoolVar(&config.bridgeConfig.EnableUserlandProxy, []string{"-userland-proxy"}, true, usageFn("Use userland proxy for loopback traffic")) + cmd.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, usageFn("Enable CORS headers in the remote API, this is deprecated by --api-cors-header")) + cmd.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", usageFn("Set CORS headers in the remote API")) + cmd.StringVar(&config.CgroupParent, []string{"-cgroup-parent"}, "", usageFn("Set parent cgroup for all containers")) + cmd.StringVar(&config.RemappedRoot, []string{"-userns-remap"}, "", usageFn("User/Group setting for user namespaces")) + cmd.StringVar(&config.ContainerdAddr, []string{"-containerd"}, "", usageFn("Path to containerd socket")) + + config.attachExperimentalFlags(cmd, usageFn) +} diff --git a/vendor/github.com/docker/docker/daemon/container_operations_unix.go b/vendor/github.com/docker/docker/daemon/container_operations_unix.go new file mode 100644 index 00000000..e4ed08c4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container_operations_unix.go @@ -0,0 +1,319 @@ +// +build linux freebsd + +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/links" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/specs/specs-go" +) + +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + var env []string + children := daemon.children(container) + + bridgeSettings := container.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if bridgeSettings == nil { + return nil, nil + } + + for linkAlias, child := range children { + if !child.IsRunning() { + return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + } + + childBridgeSettings := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if childBridgeSettings == nil { + return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID) + } + + link := links.NewLink( + bridgeSettings.IPAddress, + childBridgeSettings.IPAddress, + linkAlias, + child.Config.Env, + child.Config.ExposedPorts, + ) + + for _, envVar := range link.ToEnv() { + env = append(env, envVar) + } + } + + return env, nil +} + +// getSize returns the real size & virtual size of the container. +func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { + var ( + sizeRw, sizeRootfs int64 + err error + ) + + if err := daemon.Mount(container); err != nil { + logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) + return sizeRw, sizeRootfs + } + defer daemon.Unmount(container) + + sizeRw, err = container.RWLayer.Size() + if err != nil { + logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", + daemon.GraphDriverName(), container.ID, err) + // FIXME: GetSize should return an error. Not changing it now in case + // there is a side-effect. + sizeRw = -1 + } + + if parent := container.RWLayer.Parent(); parent != nil { + sizeRootfs, err = parent.Size() + if err != nil { + sizeRootfs = -1 + } else if sizeRw != -1 { + sizeRootfs += sizeRw + } + } + return sizeRw, sizeRootfs +} + +func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.IpcMode.Container() + c, err := daemon.GetContainer(containerID) + if err != nil { + return nil, err + } + if !c.IsRunning() { + return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) + } + if c.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return c, nil +} + +func (daemon *Daemon) setupIpcDirs(c *container.Container) error { + var err error + + c.ShmPath, err = c.ShmResourcePath() + if err != nil { + return err + } + + if c.HostConfig.IpcMode.IsContainer() { + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + c.ShmPath = ic.ShmPath + } else if c.HostConfig.IpcMode.IsHost() { + if _, err := os.Stat("/dev/shm"); err != nil { + return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") + } + c.ShmPath = "/dev/shm" + } else { + rootUID, rootGID := daemon.GetRemappedUIDGID() + if !c.HasMountFor("/dev/shm") { + shmPath, err := c.ShmResourcePath() + if err != nil { + return err + } + + if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil { + return err + } + + shmSize := container.DefaultSHMSize + if c.HostConfig.ShmSize != 0 { + shmSize = c.HostConfig.ShmSize + } + shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) + if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { + return fmt.Errorf("mounting shm tmpfs: %s", err) + } + if err := os.Chown(shmPath, rootUID, rootGID); err != nil { + return err + } + } + + } + + return nil +} + +func (daemon *Daemon) mountVolumes(container *container.Container) error { + mounts, err := daemon.setupMounts(container) + if err != nil { + return err + } + + for _, m := range mounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + return err + } + + var stat os.FileInfo + stat, err = os.Stat(m.Source) + if err != nil { + return err + } + if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { + return err + } + + opts := "rbind,ro" + if m.Writable { + opts = "rbind,rw" + } + + if err := mount.Mount(m.Source, dest, "bind", opts); err != nil { + return err + } + } + + return nil +} + +func killProcessDirectly(container *container.Container) error { + if _, err := container.WaitStop(10 * time.Second); err != nil { + // Ensure that we don't kill ourselves + if pid := container.GetPID(); pid != 0 { + logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID)) + if err := syscall.Kill(pid, 9); err != nil { + if err != syscall.ESRCH { + return err + } + e := errNoSuchProcess{pid, 9} + logrus.Debug(e) + return e + } + } + } + return nil +} + +func specDevice(d *configs.Device) specs.Device { + return specs.Device{ + Type: string(d.Type), + Path: d.Path, + Major: d.Major, + Minor: d.Minor, + FileMode: fmPtr(int64(d.FileMode)), + UID: u32Ptr(int64(d.Uid)), + GID: u32Ptr(int64(d.Gid)), + } +} + +func specDeviceCgroup(d *configs.Device) specs.DeviceCgroup { + t := string(d.Type) + return specs.DeviceCgroup{ + Allow: true, + Type: &t, + Major: &d.Major, + Minor: &d.Minor, + Access: &d.Permissions, + } +} + +func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { + resolvedPathOnHost := deviceMapping.PathOnHost + + // check if it is a symbolic link + if src, e := os.Lstat(deviceMapping.PathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { + if linkedPathOnHost, e := os.Readlink(deviceMapping.PathOnHost); e == nil { + resolvedPathOnHost = linkedPathOnHost + } + } + + device, err := devices.DeviceFromPath(resolvedPathOnHost, deviceMapping.CgroupPermissions) + // if there was no error, return the device + if err == nil { + device.Path = deviceMapping.PathInContainer + return append(devs, specDevice(device)), append(devPermissions, specDeviceCgroup(device)), nil + } + + // if the device is not a device node + // try to see if it's a directory holding many devices + if err == devices.ErrNotADevice { + + // check if it is a directory + if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { + + // mount the internal devices recursively + filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { + childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions) + if e != nil { + // ignore the device + return nil + } + + // add the device to userSpecified devices + childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, deviceMapping.PathInContainer, 1) + devs = append(devs, specDevice(childDevice)) + devPermissions = append(devPermissions, specDeviceCgroup(childDevice)) + + return nil + }) + } + } + + if len(devs) > 0 { + return devs, devPermissions, nil + } + + return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err) +} + +func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Device { + if len(userDevices) == 0 { + return defaultDevices + } + + paths := map[string]*configs.Device{} + for _, d := range userDevices { + paths[d.Path] = d + } + + var devs []*configs.Device + for _, d := range defaultDevices { + if _, defined := paths[d.Path]; !defined { + devs = append(devs, d) + } + } + return append(devs, userDevices...) +} + +func detachMounted(path string) error { + return syscall.Unmount(path, syscall.MNT_DETACH) +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func errRemovalContainer(containerID string) error { + return fmt.Errorf("Container %s is marked for removal and cannot be connected or disconnected to the network", containerID) +} diff --git a/vendor/github.com/docker/docker/daemon/create.go b/vendor/github.com/docker/docker/daemon/create.go new file mode 100644 index 00000000..34f0aa2d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/create.go @@ -0,0 +1,185 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + volumestore "github.com/docker/docker/volume/store" + "github.com/docker/engine-api/types" + containertypes "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" + "github.com/opencontainers/runc/libcontainer/label" +) + +// ContainerCreate creates a container. +func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types.ContainerCreateResponse, error) { + if params.Config == nil { + return types.ContainerCreateResponse{}, fmt.Errorf("Config cannot be empty in order to create a container") + } + + warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config, false) + if err != nil { + return types.ContainerCreateResponse{Warnings: warnings}, err + } + + err = daemon.verifyNetworkingConfig(params.NetworkingConfig) + if err != nil { + return types.ContainerCreateResponse{}, err + } + + if params.HostConfig == nil { + params.HostConfig = &containertypes.HostConfig{} + } + err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) + if err != nil { + return types.ContainerCreateResponse{Warnings: warnings}, err + } + + container, err := daemon.create(params) + if err != nil { + return types.ContainerCreateResponse{Warnings: warnings}, daemon.imageNotExistToErrcode(err) + } + + return types.ContainerCreateResponse{ID: container.ID, Warnings: warnings}, nil +} + +// Create creates a new container from the given configuration with a given name. +func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *container.Container, retErr error) { + var ( + container *container.Container + img *image.Image + imgID image.ID + err error + ) + + if params.Config.Image != "" { + img, err = daemon.GetImage(params.Config.Image) + if err != nil { + return nil, err + } + imgID = img.ID() + } + + if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { + return nil, err + } + + if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil { + return nil, err + } + defer func() { + if retErr != nil { + if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err != nil { + logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err) + } + } + }() + + if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { + return nil, err + } + + // Set RWLayer for container after mount labels have been set + if err := daemon.setRWLayer(container); err != nil { + return nil, err + } + + if err := daemon.Register(container); err != nil { + return nil, err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := daemon.setHostConfig(container, params.HostConfig); err != nil { + return nil, err + } + defer func() { + if retErr != nil { + if err := daemon.removeMountPoints(container, true); err != nil { + logrus.Error(err) + } + } + }() + + if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { + return nil, err + } + + var endpointsConfigs map[string]*networktypes.EndpointSettings + if params.NetworkingConfig != nil { + endpointsConfigs = params.NetworkingConfig.EndpointsConfig + } + + if err := daemon.updateContainerNetworkSettings(container, endpointsConfigs); err != nil { + return nil, err + } + + if err := container.ToDiskLocking(); err != nil { + logrus.Errorf("Error saving new container to disk: %v", err) + return nil, err + } + daemon.LogContainerEvent(container, "create") + return container, nil +} + +func (daemon *Daemon) generateSecurityOpt(ipcMode containertypes.IpcMode, pidMode containertypes.PidMode) ([]string, error) { + if ipcMode.IsHost() || pidMode.IsHost() { + return label.DisableSecOpt(), nil + } + if ipcContainer := ipcMode.Container(); ipcContainer != "" { + c, err := daemon.GetContainer(ipcContainer) + if err != nil { + return nil, err + } + + return label.DupSecOpt(c.ProcessLabel), nil + } + return nil, nil +} + +func (daemon *Daemon) setRWLayer(container *container.Container) error { + var layerID layer.ChainID + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return err + } + layerID = img.RootFS.ChainID() + } + rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, container.MountLabel, daemon.setupInitLayer) + if err != nil { + return err + } + container.RWLayer = rwLayer + + return nil +} + +// VolumeCreate creates a volume with the specified name, driver, and opts +// This is called directly from the remote API +func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) { + if name == "" { + name = stringid.GenerateNonCryptoID() + } + + v, err := daemon.volumes.Create(name, driverName, opts, labels) + if err != nil { + if volumestore.IsNameConflict(err) { + return nil, fmt.Errorf("A volume named %s already exists. Choose a different volume name.", name) + } + return nil, err + } + + daemon.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) + return volumeToAPIType(v), nil +} diff --git a/vendor/github.com/docker/docker/daemon/create_unix.go b/vendor/github.com/docker/docker/daemon/create_unix.go new file mode 100644 index 00000000..37c4a911 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/create_unix.go @@ -0,0 +1,76 @@ +// +build !windows + +package daemon + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + if err := daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := container.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + + for spec := range config.Volumes { + name := stringid.GenerateNonCryptoID() + destination := filepath.Clean(spec) + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(destination) { + continue + } + path, err := container.GetResourcePath(destination) + if err != nil { + return err + } + + stat, err := os.Stat(path) + if err == nil && !stat.IsDir() { + return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) + } + + v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { + return err + } + + container.AddMountPointWithVolume(destination, v, true) + } + return daemon.populateVolumes(container) +} + +// populateVolumes copies data from the container's rootfs into the volume for non-binds. +// this is only called when the container is created. +func (daemon *Daemon) populateVolumes(c *container.Container) error { + for _, mnt := range c.MountPoints { + if !mnt.CopyData || mnt.Volume == nil { + continue + } + + logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) + if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon.go b/vendor/github.com/docker/docker/daemon/daemon.go new file mode 100644 index 00000000..f12034c6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon.go @@ -0,0 +1,1538 @@ +// Package daemon exposes the functions that occur on the host server +// that the Docker daemon is running. +// +// In implementing the various functions of the daemon, there is often +// a method-specific struct for configuring the runtime behavior. +package daemon + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/api" + "github.com/docker/docker/builder" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/errors" + "github.com/docker/engine-api/types" + containertypes "github.com/docker/engine-api/types/container" + eventtypes "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" + networktypes "github.com/docker/engine-api/types/network" + registrytypes "github.com/docker/engine-api/types/registry" + "github.com/docker/engine-api/types/strslice" + // register graph drivers + _ "github.com/docker/docker/daemon/graphdriver/register" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/distribution" + dmetadata "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/tarexport" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/go-connections/nat" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +const ( + // maxDownloadConcurrency is the maximum number of downloads that + // may take place at a time for each pull. + maxDownloadConcurrency = 3 + // maxUploadConcurrency is the maximum number of uploads that + // may take place at a time for each push. + maxUploadConcurrency = 5 +) + +var ( + validContainerNameChars = utils.RestrictedNameChars + validContainerNamePattern = utils.RestrictedNamePattern + + errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.") +) + +// ErrImageDoesNotExist is error returned when no image can be found for a reference. +type ErrImageDoesNotExist struct { + RefOrID string +} + +func (e ErrImageDoesNotExist) Error() string { + return fmt.Sprintf("no such id: %s", e.RefOrID) +} + +// Daemon holds information about the Docker daemon. +type Daemon struct { + ID string + repository string + containers container.Store + execCommands *exec.Store + referenceStore reference.Store + downloadManager *xfer.LayerDownloadManager + uploadManager *xfer.LayerUploadManager + distributionMetadataStore dmetadata.Store + trustKey libtrust.PrivateKey + idIndex *truncindex.TruncIndex + configStore *Config + statsCollector *statsCollector + defaultLogConfig containertypes.LogConfig + RegistryService *registry.Service + EventsService *events.Events + volumes *store.VolumeStore + root string + seccompEnabled bool + shutdown bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + layerStore layer.Store + imageStore image.Store + nameIndex *registrar.Registrar + linkIndex *linkIndex + containerd libcontainerd.Client + defaultIsolation containertypes.Isolation // Default isolation mode on Windows +} + +// GetContainer looks for a container using the provided information, which could be +// one of the following inputs from the caller: +// - A full container ID, which will exact match a container in daemon's list +// - A container name, which will only exact match via the GetByName() function +// - A partial container ID prefix (e.g. short ID) of any length that is +// unique enough to only return a single container object +// If none of these searches succeed, an error is returned +func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) { + if len(prefixOrName) == 0 { + return nil, errors.NewBadRequestError(fmt.Errorf("No container name or ID supplied")) + } + + if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { + // prefix is an exact match to a full container ID + return containerByID, nil + } + + // GetByName will match only an exact name provided; we ignore errors + if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil { + // prefix is an exact match to a full container Name + return containerByName, nil + } + + containerID, indexError := daemon.idIndex.Get(prefixOrName) + if indexError != nil { + // When truncindex defines an error type, use that instead + if indexError == truncindex.ErrNotExist { + err := fmt.Errorf("No such container: %s", prefixOrName) + return nil, errors.NewRequestNotFoundError(err) + } + return nil, indexError + } + return daemon.containers.Get(containerID), nil +} + +// Exists returns a true if a container of the specified ID or name exists, +// false otherwise. +func (daemon *Daemon) Exists(id string) bool { + c, _ := daemon.GetContainer(id) + return c != nil +} + +// IsPaused returns a bool indicating if the specified container is paused. +func (daemon *Daemon) IsPaused(id string) bool { + c, _ := daemon.GetContainer(id) + return c.State.IsPaused() +} + +func (daemon *Daemon) containerRoot(id string) string { + return filepath.Join(daemon.repository, id) +} + +// Load reads the contents of a container from disk +// This is typically done at startup. +func (daemon *Daemon) load(id string) (*container.Container, error) { + container := daemon.newBaseContainer(id) + + if err := container.FromDisk(); err != nil { + return nil, err + } + + if container.ID != id { + return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) + } + + return container, nil +} + +func (daemon *Daemon) registerName(container *container.Container) error { + if daemon.Exists(container.ID) { + return fmt.Errorf("Container is already loaded") + } + if err := validateID(container.ID); err != nil { + return err + } + if container.Name == "" { + name, err := daemon.generateNewName(container.ID) + if err != nil { + return err + } + container.Name = name + + if err := container.ToDiskLocking(); err != nil { + logrus.Errorf("Error saving container name to disk: %v", err) + } + } + return daemon.nameIndex.Reserve(container.Name, container.ID) +} + +// Register makes a container object usable by the daemon as +func (daemon *Daemon) Register(c *container.Container) error { + // Attach to stdout and stderr + if c.Config.OpenStdin { + c.NewInputPipes() + } else { + c.NewNopInputPipe() + } + + daemon.containers.Add(c.ID, c) + daemon.idIndex.Add(c.ID) + + return nil +} + +func (daemon *Daemon) restore() error { + var ( + debug = utils.IsDebugEnabled() + currentDriver = daemon.GraphDriverName() + containers = make(map[string]*container.Container) + ) + + if !debug { + logrus.Info("Loading containers: start.") + } + dir, err := ioutil.ReadDir(daemon.repository) + if err != nil { + return err + } + + for _, v := range dir { + id := v.Name() + container, err := daemon.load(id) + if !debug && logrus.GetLevel() == logrus.InfoLevel { + fmt.Print(".") + } + if err != nil { + logrus.Errorf("Failed to load container %v: %v", id, err) + continue + } + + // Ignore the container if it does not support the current driver being used by the graph + if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { + rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) + if err != nil { + logrus.Errorf("Failed to load container mount %v: %v", id, err) + continue + } + container.RWLayer = rwlayer + logrus.Debugf("Loaded container %v", container.ID) + + containers[container.ID] = container + } else { + logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) + } + } + + restartContainers := make(map[*container.Container]chan struct{}) + for _, c := range containers { + if err := daemon.registerName(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + continue + } + if err := daemon.Register(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + continue + } + } + var wg sync.WaitGroup + var mapLock sync.Mutex + for _, c := range containers { + wg.Add(1) + go func(c *container.Container) { + defer wg.Done() + if c.IsRunning() || c.IsPaused() { + // Fix activityCount such that graph mounts can be unmounted later + if err := daemon.layerStore.ReinitRWLayer(c.RWLayer); err != nil { + logrus.Errorf("Failed to ReinitRWLayer for %s due to %s", c.ID, err) + return + } + if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(c.RestartManager(true))); err != nil { + logrus.Errorf("Failed to restore with containerd: %q", err) + return + } + } + // fixme: only if not running + // get list of containers we need to restart + if daemon.configStore.AutoRestart && !c.IsRunning() && !c.IsPaused() && c.ShouldRestartOnBoot() { + mapLock.Lock() + restartContainers[c] = make(chan struct{}) + mapLock.Unlock() + } + }(c) + } + wg.Wait() + + // Now that all the containers are registered, register the links + for _, c := range containers { + if err := daemon.registerLinks(c, c.HostConfig); err != nil { + logrus.Errorf("failed to register link for container %s: %v", c.ID, err) + } + } + + group := sync.WaitGroup{} + for c, notifier := range restartContainers { + group.Add(1) + + go func(c *container.Container, chNotify chan struct{}) { + defer group.Done() + + logrus.Debugf("Starting container %s", c.ID) + + // ignore errors here as this is a best effort to wait for children to be + // running before we try to start the container + children := daemon.children(c) + timeout := time.After(5 * time.Second) + for _, child := range children { + if notifier, exists := restartContainers[child]; exists { + select { + case <-notifier: + case <-timeout: + } + } + } + + // Make sure networks are available before starting + if err := daemon.containerStart(c); err != nil { + logrus.Errorf("Failed to start container %s: %s", c.ID, err) + } + close(chNotify) + }(c, notifier) + + } + group.Wait() + + // any containers that were started above would already have had this done, + // however we need to now prepare the mountpoints for the rest of the containers as well. + // This shouldn't cause any issue running on the containers that already had this run. + // This must be run after any containers with a restart policy so that containerized plugins + // can have a chance to be running before we try to initialize them. + for _, c := range containers { + // if the container has restart policy, do not + // prepare the mountpoints since it has been done on restarting. + // This is to speed up the daemon start when a restart container + // has a volume and the volume dirver is not available. + if _, ok := restartContainers[c]; ok { + continue + } + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.prepareMountPoints(c); err != nil { + logrus.Error(err) + } + }(c) + } + + group.Wait() + + if !debug { + if logrus.GetLevel() == logrus.InfoLevel { + fmt.Println() + } + logrus.Info("Loading containers: done.") + } + + return nil +} + +func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { + if img != nil && img.Config != nil { + if err := merge(config, img.Config); err != nil { + return err + } + } + if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { + return fmt.Errorf("No command specified") + } + return nil +} + +func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { + var ( + err error + id = stringid.GenerateNonCryptoID() + ) + + if name == "" { + if name, err = daemon.generateNewName(id); err != nil { + return "", "", err + } + return id, name, nil + } + + if name, err = daemon.reserveName(id, name); err != nil { + return "", "", err + } + + return id, name, nil +} + +func (daemon *Daemon) reserveName(id, name string) (string, error) { + if !validContainerNamePattern.MatchString(name) { + return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + } + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + id, err := daemon.nameIndex.Get(name) + if err != nil { + logrus.Errorf("got unexpected error while looking up reserved name: %v", err) + return "", err + } + return "", fmt.Errorf("Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", name, id) + } + return "", fmt.Errorf("error reserving name: %s, error: %v", name, err) + } + return name, nil +} + +func (daemon *Daemon) releaseName(name string) { + daemon.nameIndex.Release(name) +} + +func (daemon *Daemon) generateNewName(id string) (string, error) { + var name string + for i := 0; i < 6; i++ { + name = namesgenerator.GetRandomName(i) + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + continue + } + return "", err + } + return name, nil + } + + name = "/" + stringid.TruncateID(id) + if err := daemon.nameIndex.Reserve(name, id); err != nil { + return "", err + } + return name, nil +} + +func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) { + // Generate default hostname + if config.Hostname == "" { + config.Hostname = id[:12] + } +} + +func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint strslice.StrSlice, configCmd strslice.StrSlice) (string, []string) { + if len(configEntrypoint) != 0 { + return configEntrypoint[0], append(configEntrypoint[1:], configCmd...) + } + return configCmd[0], configCmd[1:] +} + +func (daemon *Daemon) newContainer(name string, config *containertypes.Config, imgID image.ID) (*container.Container, error) { + var ( + id string + err error + noExplicitName = name == "" + ) + id, name, err = daemon.generateIDAndName(name) + if err != nil { + return nil, err + } + + daemon.generateHostname(id, config) + entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) + + base := daemon.newBaseContainer(id) + base.Created = time.Now().UTC() + base.Path = entrypoint + base.Args = args //FIXME: de-duplicate from config + base.Config = config + base.HostConfig = &containertypes.HostConfig{} + base.ImageID = imgID + base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} + base.Name = name + base.Driver = daemon.GraphDriverName() + + return base, err +} + +// GetByName returns a container given a name. +func (daemon *Daemon) GetByName(name string) (*container.Container, error) { + if len(name) == 0 { + return nil, fmt.Errorf("No container name supplied") + } + fullName := name + if name[0] != '/' { + fullName = "/" + name + } + id, err := daemon.nameIndex.Get(fullName) + if err != nil { + return nil, fmt.Errorf("Could not find entity for %s", name) + } + e := daemon.containers.Get(id) + if e == nil { + return nil, fmt.Errorf("Could not find container for entity id %s", id) + } + return e, nil +} + +// SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. +func (daemon *Daemon) SubscribeToEvents(since, sinceNano int64, filter filters.Args) ([]eventtypes.Message, chan interface{}) { + ef := events.NewFilter(filter) + return daemon.EventsService.SubscribeTopic(since, sinceNano, ef) +} + +// UnsubscribeFromEvents stops the event subscription for a client by closing the +// channel where the daemon sends events to. +func (daemon *Daemon) UnsubscribeFromEvents(listener chan interface{}) { + daemon.EventsService.Evict(listener) +} + +// GetLabels for a container or image id +func (daemon *Daemon) GetLabels(id string) map[string]string { + // TODO: TestCase + container := daemon.containers.Get(id) + if container != nil { + return container.Config.Labels + } + + img, err := daemon.GetImage(id) + if err == nil { + return img.ContainerConfig.Labels + } + return nil +} + +func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.children(c) +} + +// parents returns the names of the parent containers of the container +// with the given name. +func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.parents(c) +} + +func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { + fullName := path.Join(parent.Name, alias) + if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { + if err == registrar.ErrNameReserved { + logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) + return nil + } + return err + } + daemon.linkIndex.link(parent, child, fullName) + return nil +} + +// NewDaemon sets up everything for the daemon to be able to service +// requests from the webserver. +func NewDaemon(config *Config, registryService *registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) { + setDefaultMtu(config) + + // Ensure we have compatible and valid configuration options + if err := verifyDaemonSettings(config); err != nil { + return nil, err + } + + // Do we have a disabled network? + config.DisableBridge = isBridgeNetworkDisabled(config) + + // Verify the platform is supported as a daemon + if !platformSupported { + return nil, errSystemNotSupported + } + + // Validate platform-specific requirements + if err := checkSystem(); err != nil { + return nil, err + } + + // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event + // on Windows to dump Go routine stacks + setupDumpStackTrap() + + uidMaps, gidMaps, err := setupRemappedRoot(config) + if err != nil { + return nil, err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + + // get the canonical path to the Docker root directory + var realRoot string + if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { + realRoot = config.Root + } else { + realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) + } + } + + if err = setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { + return nil, err + } + + // set up the tmpDir to use a canonical path + tmp, err := tempDir(config.Root, rootUID, rootGID) + if err != nil { + return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) + } + realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + } + os.Setenv("TMPDIR", realTmp) + + d := &Daemon{configStore: config} + // Ensure the daemon is properly shutdown if there is a failure during + // initialization + defer func() { + if err != nil { + if err := d.Shutdown(); err != nil { + logrus.Error(err) + } + } + }() + + // Set the default isolation mode (only applicable on Windows) + if err := d.setDefaultIsolation(); err != nil { + return nil, fmt.Errorf("error setting default isolation mode: %v", err) + } + + // Verify logging driver type + if config.LogConfig.Type != "none" { + if _, err := logger.GetLogDriver(config.LogConfig.Type); err != nil { + return nil, fmt.Errorf("error finding the logging driver: %v", err) + } + } + logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) + + if err := configureMaxThreads(config); err != nil { + logrus.Warnf("Failed to configure golang's threads limit: %v", err) + } + + installDefaultAppArmorProfile() + daemonRepo := filepath.Join(config.Root, "containers") + if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + driverName := os.Getenv("DOCKER_DRIVER") + if driverName == "" { + driverName = config.GraphDriver + } + d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ + StorePath: config.Root, + MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), + GraphDriver: driverName, + GraphDriverOptions: config.GraphOptions, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + }) + if err != nil { + return nil, err + } + + graphDriver := d.layerStore.DriverName() + imageRoot := filepath.Join(config.Root, "image", graphDriver) + + // Configure and validate the kernels security support + if err := configureKernelSecuritySupport(config, graphDriver); err != nil { + return nil, err + } + + d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, maxDownloadConcurrency) + d.uploadManager = xfer.NewLayerUploadManager(maxUploadConcurrency) + + ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) + if err != nil { + return nil, err + } + + d.imageStore, err = image.NewImageStore(ifs, d.layerStore) + if err != nil { + return nil, err + } + + // Configure the volumes driver + volStore, err := configureVolumes(config, rootUID, rootGID) + if err != nil { + return nil, err + } + + trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) + if err != nil { + return nil, err + } + + trustDir := filepath.Join(config.Root, "trust") + + if err := system.MkdirAll(trustDir, 0700); err != nil { + return nil, err + } + + distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) + if err != nil { + return nil, err + } + + eventsService := events.New() + + referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) + } + + if err := restoreCustomImage(d.imageStore, d.layerStore, referenceStore); err != nil { + return nil, fmt.Errorf("Couldn't restore custom images: %s", err) + } + + sysInfo := sysinfo.New(false) + // Check if Devices cgroup is mounted, it is hard requirement for container security, + // on Linux/FreeBSD. + if runtime.GOOS != "windows" && !sysInfo.CgroupDevicesEnabled { + return nil, fmt.Errorf("Devices cgroup isn't mounted") + } + + d.ID = trustKey.PublicKey().KeyID() + d.repository = daemonRepo + d.containers = container.NewMemoryStore() + d.execCommands = exec.NewStore() + d.referenceStore = referenceStore + d.distributionMetadataStore = distributionMetadataStore + d.trustKey = trustKey + d.idIndex = truncindex.NewTruncIndex([]string{}) + d.statsCollector = d.newStatsCollector(1 * time.Second) + d.defaultLogConfig = containertypes.LogConfig{ + Type: config.LogConfig.Type, + Config: config.LogConfig.Config, + } + d.RegistryService = registryService + d.EventsService = eventsService + d.volumes = volStore + d.root = config.Root + d.uidMaps = uidMaps + d.gidMaps = gidMaps + d.seccompEnabled = sysInfo.Seccomp + + d.nameIndex = registrar.NewRegistrar() + d.linkIndex = newLinkIndex() + + go d.execCommandGC() + + d.containerd, err = containerdRemote.Client(d) + if err != nil { + return nil, err + } + + if err := d.restore(); err != nil { + return nil, err + } + + return d, nil +} + +func (daemon *Daemon) shutdownContainer(c *container.Container) error { + // TODO(windows): Handle docker restart with paused containers + if c.IsPaused() { + // To terminate a process in freezer cgroup, we should send + // SIGTERM to this process then unfreeze it, and the process will + // force to terminate immediately. + logrus.Debugf("Found container %s is paused, sending SIGTERM before unpause it", c.ID) + sig, ok := signal.SignalMap["TERM"] + if !ok { + return fmt.Errorf("System doesn not support SIGTERM") + } + if err := daemon.kill(c, int(sig)); err != nil { + return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) + } + if err := daemon.containerUnpause(c); err != nil { + return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) + } + if _, err := c.WaitStop(10 * time.Second); err != nil { + logrus.Debugf("container %s failed to exit in 10 second of SIGTERM, sending SIGKILL to force", c.ID) + sig, ok := signal.SignalMap["KILL"] + if !ok { + return fmt.Errorf("System does not support SIGKILL") + } + if err := daemon.kill(c, int(sig)); err != nil { + logrus.Errorf("Failed to SIGKILL container %s", c.ID) + } + c.WaitStop(-1 * time.Second) + return err + } + } + // If container failed to exit in 10 seconds of SIGTERM, then using the force + if err := daemon.containerStop(c, 10); err != nil { + return fmt.Errorf("Stop container %s with error: %v", c.ID, err) + } + + c.WaitStop(-1 * time.Second) + return nil +} + +// Shutdown stops the daemon. +func (daemon *Daemon) Shutdown() error { + daemon.shutdown = true + if daemon.containers != nil { + logrus.Debug("starting clean shutdown of all containers...") + daemon.containers.ApplyAll(func(c *container.Container) { + if !c.IsRunning() { + return + } + logrus.Debugf("stopping %s", c.ID) + if err := daemon.shutdownContainer(c); err != nil { + logrus.Errorf("Stop container error: %v", err) + return + } + if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + logrus.Debugf("container stopped %s", c.ID) + }) + } + + if daemon.layerStore != nil { + if err := daemon.layerStore.Cleanup(); err != nil { + logrus.Errorf("Error during layer Store.Cleanup(): %v", err) + } + } + + if err := daemon.cleanupMounts(); err != nil { + return err + } + + return nil +} + +// Mount sets container.BaseFS +// (is it not set coming in? why is it unset?) +func (daemon *Daemon) Mount(container *container.Container) error { + dir, err := container.RWLayer.Mount(container.GetMountLabel()) + if err != nil { + return err + } + logrus.Debugf("container mounted via layerStore: %v", dir) + + if container.BaseFS != dir { + // The mount path reported by the graph driver should always be trusted on Windows, since the + // volume path for a given mounted layer may change over time. This should only be an error + // on non-Windows operating systems. + if container.BaseFS != "" && runtime.GOOS != "windows" { + daemon.Unmount(container) + return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + daemon.GraphDriverName(), container.ID, container.BaseFS, dir) + } + } + container.BaseFS = dir // TODO: combine these fields + return nil +} + +// Unmount unsets the container base filesystem +func (daemon *Daemon) Unmount(container *container.Container) error { + if err := container.RWLayer.Unmount(); err != nil { + logrus.Errorf("Error unmounting container %s: %s", container.ID, err) + return err + } + return nil +} + +func (daemon *Daemon) kill(c *container.Container, sig int) error { + return daemon.containerd.Signal(c.ID, sig) +} + +func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} { + return daemon.statsCollector.collect(c) +} + +func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) { + daemon.statsCollector.unsubscribe(c, ch) +} + +func (daemon *Daemon) changes(container *container.Container) ([]archive.Change, error) { + return container.RWLayer.Changes() +} + +// TagImage creates the tag specified by newTag, pointing to the image named +// imageName (alternatively, imageName can also be an image ID). +func (daemon *Daemon) TagImage(newTag reference.Named, imageName string) error { + imageID, err := daemon.GetImageID(imageName) + if err != nil { + return err + } + if err := daemon.referenceStore.AddTag(newTag, imageID, true); err != nil { + return err + } + + daemon.LogImageEvent(imageID.String(), newTag.String(), "tag") + return nil +} + +func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { + progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) + operationCancelled := false + + for prog := range progressChan { + if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { + // don't log broken pipe errors as this is the normal case when a client aborts + if isBrokenPipe(err) { + logrus.Info("Pull session cancelled") + } else { + logrus.Errorf("error writing progress to client: %v", err) + } + cancelFunc() + operationCancelled = true + // Don't return, because we need to continue draining + // progressChan until it's closed to avoid a deadlock. + } + } +} + +func isBrokenPipe(e error) bool { + if netErr, ok := e.(*net.OpError); ok { + e = netErr.Err + if sysErr, ok := netErr.Err.(*os.SyscallError); ok { + e = sysErr.Err + } + } + return e == syscall.EPIPE +} + +// PullImage initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func (daemon *Daemon) PullImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + writeDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + imagePullConfig := &distribution.ImagePullConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.distributionMetadataStore, + ImageStore: daemon.imageStore, + ReferenceStore: daemon.referenceStore, + DownloadManager: daemon.downloadManager, + } + + err := distribution.Pull(ctx, ref, imagePullConfig) + close(progressChan) + <-writesDone + return err +} + +// PullOnBuild tells Docker to pull image referenced by `name`. +func (daemon *Daemon) PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) { + ref, err := reference.ParseNamed(name) + if err != nil { + return nil, err + } + ref = reference.WithDefaultTag(ref) + + pullRegistryAuth := &types.AuthConfig{} + if len(authConfigs) > 0 { + // The request came with a full auth config file, we prefer to use that + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, err + } + + resolvedConfig := registry.ResolveAuthConfig( + authConfigs, + repoInfo.Index, + ) + pullRegistryAuth = &resolvedConfig + } + + if err := daemon.PullImage(ctx, ref, nil, pullRegistryAuth, output); err != nil { + return nil, err + } + return daemon.GetImage(name) +} + +// ExportImage exports a list of images to the given output stream. The +// exported images are archived into a tar when written to the output +// stream. All images with the given tag and all versions containing +// the same tag are exported. names is the set of tags to export, and +// outStream is the writer which the images are written to. +func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore) + return imageExporter.Save(names, outStream) +} + +// PushImage initiates a push operation on the repository named localName. +func (daemon *Daemon) PushImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + writeDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + imagePushConfig := &distribution.ImagePushConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.distributionMetadataStore, + LayerStore: daemon.layerStore, + ImageStore: daemon.imageStore, + ReferenceStore: daemon.referenceStore, + TrustKey: daemon.trustKey, + UploadManager: daemon.uploadManager, + } + + err := distribution.Push(ctx, ref, imagePushConfig) + close(progressChan) + <-writesDone + return err +} + +// LookupImage looks up an image by name and returns it as an ImageInspect +// structure. +func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, fmt.Errorf("No such image: %s", name) + } + + refs := daemon.referenceStore.References(img.ID()) + repoTags := []string{} + repoDigests := []string{} + for _, ref := range refs { + switch ref.(type) { + case reference.NamedTagged: + repoTags = append(repoTags, ref.String()) + case reference.Canonical: + repoDigests = append(repoDigests, ref.String()) + } + } + + var size int64 + var layerMetadata map[string]string + layerID := img.RootFS.ChainID() + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + size, err = l.Size() + if err != nil { + return nil, err + } + + layerMetadata, err = l.Metadata() + if err != nil { + return nil, err + } + } + + comment := img.Comment + if len(comment) == 0 && len(img.History) > 0 { + comment = img.History[len(img.History)-1].Comment + } + + imageInspect := &types.ImageInspect{ + ID: img.ID().String(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Parent: img.Parent.String(), + Comment: comment, + Created: img.Created.Format(time.RFC3339Nano), + Container: img.Container, + ContainerConfig: &img.ContainerConfig, + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: img.Config, + Architecture: img.Architecture, + Os: img.OS, + Size: size, + VirtualSize: size, // TODO: field unused, deprecate + RootFS: rootFSToAPIType(img.RootFS), + } + + imageInspect.GraphDriver.Name = daemon.GraphDriverName() + + imageInspect.GraphDriver.Data = layerMetadata + + return imageInspect, nil +} + +// LoadImage uploads a set of images into the repository. This is the +// complement of ImageExport. The input stream is an uncompressed tar +// ball containing images and metadata. +func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore) + return imageExporter.Load(inTar, outStream, quiet) +} + +// ImageHistory returns a slice of ImageHistory structures for the specified image +// name by walking the image lineage. +func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + + history := []*types.ImageHistory{} + + layerCounter := 0 + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + for _, h := range img.History { + var layerSize int64 + + if !h.EmptyLayer { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, fmt.Errorf("too many non-empty layers in History section") + } + + rootFS.Append(img.RootFS.DiffIDs[layerCounter]) + l, err := daemon.layerStore.Get(rootFS.ChainID()) + if err != nil { + return nil, err + } + layerSize, err = l.DiffSize() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + + layerCounter++ + } + + history = append([]*types.ImageHistory{{ + ID: "", + Created: h.Created.Unix(), + CreatedBy: h.CreatedBy, + Comment: h.Comment, + Size: layerSize, + }}, history...) + } + + // Fill in image IDs and tags + histImg := img + id := img.ID() + for _, h := range history { + h.ID = id.String() + + var tags []string + for _, r := range daemon.referenceStore.References(id) { + if _, ok := r.(reference.NamedTagged); ok { + tags = append(tags, r.String()) + } + } + + h.Tags = tags + + id = histImg.Parent + if id == "" { + break + } + histImg, err = daemon.GetImage(id.String()) + if err != nil { + break + } + } + + return history, nil +} + +// GetImageID returns an image ID corresponding to the image referred to by +// refOrID. +func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { + id, ref, err := reference.ParseIDOrReference(refOrID) + if err != nil { + return "", err + } + if id != "" { + if _, err := daemon.imageStore.Get(image.ID(id)); err != nil { + return "", ErrImageDoesNotExist{refOrID} + } + return image.ID(id), nil + } + + if id, err := daemon.referenceStore.Get(ref); err == nil { + return id, nil + } + if tagged, ok := ref.(reference.NamedTagged); ok { + if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil { + for _, namedRef := range daemon.referenceStore.References(id) { + if namedRef.Name() == ref.Name() { + return id, nil + } + } + } + } + + // Search based on ID + if id, err := daemon.imageStore.Search(refOrID); err == nil { + return id, nil + } + + return "", ErrImageDoesNotExist{refOrID} +} + +// GetImage returns an image corresponding to the image referred to by refOrID. +func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { + imgID, err := daemon.GetImageID(refOrID) + if err != nil { + return nil, err + } + return daemon.imageStore.Get(imgID) +} + +// GetImageOnBuild looks up a Docker image referenced by `name`. +func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + return img, nil +} + +// GraphDriverName returns the name of the graph driver used by the layer.Store +func (daemon *Daemon) GraphDriverName() string { + return daemon.layerStore.DriverName() +} + +// GetUIDGIDMaps returns the current daemon's user namespace settings +// for the full uid and gid maps which will be applied to containers +// started in this instance. +func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { + return daemon.uidMaps, daemon.gidMaps +} + +// GetRemappedUIDGID returns the current daemon's uid and gid values +// if user namespaces are in use for this daemon instance. If not +// this function will return "real" root values of 0, 0. +func (daemon *Daemon) GetRemappedUIDGID() (int, int) { + uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + return uid, gid +} + +// GetCachedImage returns the most recent created image that is a child +// of the image with imgID, that had the same config when it was +// created. nil is returned if a child cannot be found. An error is +// returned if the parent image cannot be found. +func (daemon *Daemon) GetCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) { + // Loop on the children of the given image and check the config + getMatch := func(siblings []image.ID) (*image.Image, error) { + var match *image.Image + for _, id := range siblings { + img, err := daemon.imageStore.Get(id) + if err != nil { + return nil, fmt.Errorf("unable to find image %q", id) + } + + if runconfig.Compare(&img.ContainerConfig, config) { + // check for the most up to date match + if match == nil || match.Created.Before(img.Created) { + match = img + } + } + } + return match, nil + } + + // In this case, this is `FROM scratch`, which isn't an actual image. + if imgID == "" { + images := daemon.imageStore.Map() + var siblings []image.ID + for id, img := range images { + if img.Parent == imgID { + siblings = append(siblings, id) + } + } + return getMatch(siblings) + } + + // find match from child images + siblings := daemon.imageStore.Children(imgID) + return getMatch(siblings) +} + +// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` +// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. +func (daemon *Daemon) GetCachedImageOnBuild(imgID string, cfg *containertypes.Config) (string, error) { + cache, err := daemon.GetCachedImage(image.ID(imgID), cfg) + if cache == nil || err != nil { + return "", err + } + return cache.ID().String(), nil +} + +// tempDir returns the default directory to use for temporary files. +func tempDir(rootDir string, rootUID, rootGID int) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + } + return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) +} + +func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + return parseSecurityOpt(container, hostConfig) +} + +func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { + // Do not lock while creating volumes since this could be calling out to external plugins + // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin + if err := daemon.registerMountPoints(container, hostConfig); err != nil { + return err + } + + container.Lock() + defer container.Unlock() + + // Register any links from the host config before starting the container + if err := daemon.registerLinks(container, hostConfig); err != nil { + return err + } + + // make sure links is not nil + // this ensures that on the next daemon restart we don't try to migrate from legacy sqlite links + if hostConfig.Links == nil { + hostConfig.Links = []string{} + } + + container.HostConfig = hostConfig + return container.ToDisk() +} + +func (daemon *Daemon) setupInitLayer(initPath string) error { + rootUID, rootGID := daemon.GetRemappedUIDGID() + return setupInitLayer(initPath, rootUID, rootGID) +} + +func setDefaultMtu(config *Config) { + // do nothing if the config does not have the default 0 value. + if config.Mtu != 0 { + return + } + config.Mtu = defaultNetworkMtu +} + +// verifyContainerSettings performs validation of the hostconfig and config +// structures. +func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + + // First perform verification of settings common across all platforms. + if config != nil { + if config.WorkingDir != "" { + config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics + if !system.IsAbs(config.WorkingDir) { + return nil, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path.", config.WorkingDir) + } + } + + if len(config.StopSignal) > 0 { + _, err := signal.ParseSignal(config.StopSignal) + if err != nil { + return nil, err + } + } + } + + if hostConfig == nil { + return nil, nil + } + + logCfg := daemon.getLogConfig(hostConfig.LogConfig) + if err := logger.ValidateLogOpts(logCfg.Type, logCfg.Config); err != nil { + return nil, err + } + + for port := range hostConfig.PortBindings { + _, portStr := nat.SplitProtoPort(string(port)) + if _, err := nat.ParsePort(portStr); err != nil { + return nil, fmt.Errorf("Invalid port specification: %q", portStr) + } + for _, pb := range hostConfig.PortBindings[port] { + _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) + if err != nil { + return nil, fmt.Errorf("Invalid port specification: %q", pb.HostPort) + } + } + } + + // Now do platform-specific verification + return verifyPlatformContainerSettings(daemon, hostConfig, config, update) +} + +// Checks if the client set configurations for more than one network while creating a container +func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { + if nwConfig == nil || len(nwConfig.EndpointsConfig) <= 1 { + return nil + } + l := make([]string, 0, len(nwConfig.EndpointsConfig)) + for k := range nwConfig.EndpointsConfig { + l = append(l, k) + } + err := fmt.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", ")) + return errors.NewBadRequestError(err) +} + +func configureVolumes(config *Config, rootUID, rootGID int) (*store.VolumeStore, error) { + volumesDriver, err := local.New(config.Root, rootUID, rootGID) + if err != nil { + return nil, err + } + + volumedrivers.Register(volumesDriver, volumesDriver.Name()) + return store.New(config.Root) +} + +// AuthenticateToRegistry checks the validity of credentials in authConfig +func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) { + return daemon.RegistryService.Auth(authConfig, dockerversion.DockerUserAgent(ctx)) +} + +// SearchRegistryForImages queries the registry for images matching +// term. authConfig is used to login. +func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, term string, + authConfig *types.AuthConfig, + headers map[string][]string) (*registrytypes.SearchResults, error) { + return daemon.RegistryService.Search(term, authConfig, dockerversion.DockerUserAgent(ctx), headers) +} + +// IsShuttingDown tells whether the daemon is shutting down or not +func (daemon *Daemon) IsShuttingDown() bool { + return daemon.shutdown +} + +// GetContainerStats collects all the stats published by a container +func (daemon *Daemon) GetContainerStats(container *container.Container) (*types.StatsJSON, error) { + stats, err := daemon.stats(container) + if err != nil { + return nil, err + } + + return stats, nil +} + +// newBaseContainer creates a new container with its initial +// configuration based on the root storage from the daemon. +func (daemon *Daemon) newBaseContainer(id string) *container.Container { + return container.NewBaseContainer(id, daemon.containerRoot(id)) +} + +// Reload reads configuration changes and modifies the +// daemon according to those changes. +// This are the settings that Reload changes: +// - Daemon labels. +// - Cluster discovery (reconfigure and restart). +func (daemon *Daemon) Reload(config *Config) error { + daemon.configStore.reloadLock.Lock() + defer daemon.configStore.reloadLock.Unlock() + if config.IsValueSet("labels") { + daemon.configStore.Labels = config.Labels + } + if config.IsValueSet("debug") { + daemon.configStore.Debug = config.Debug + } + return nil +} + +func validateID(id string) error { + if id == "" { + return fmt.Errorf("Invalid empty id") + } + return nil +} + +func isBridgeNetworkDisabled(config *Config) bool { + return config.bridgeConfig.Iface == disableNetworkBridge +} + +func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { + out := make([]types.BlkioStatEntry, len(entries)) + for i, re := range entries { + out[i] = types.BlkioStatEntry{ + Major: re.Major, + Minor: re.Minor, + Op: re.Op, + Value: re.Value, + } + } + return out +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_experimental.go b/vendor/github.com/docker/docker/daemon/daemon_experimental.go new file mode 100644 index 00000000..3fd0e765 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_experimental.go @@ -0,0 +1,9 @@ +// +build experimental + +package daemon + +import "github.com/docker/engine-api/types/container" + +func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_linux.go b/vendor/github.com/docker/docker/daemon/daemon_linux.go new file mode 100644 index 00000000..9bdf6e2b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_linux.go @@ -0,0 +1,80 @@ +package daemon + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +func (daemon *Daemon) cleanupMountsByID(id string) error { + logrus.Debugf("Cleaning up old mountid %s: start.", id) + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return err + } + defer f.Close() + + return daemon.cleanupMountsFromReaderByID(f, id, mount.Unmount) +} + +func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, unmount func(target string) error) error { + if daemon.root == "" { + return nil + } + var errors []string + + regexps := getCleanPatterns(id) + sc := bufio.NewScanner(reader) + for sc.Scan() { + if fields := strings.Fields(sc.Text()); len(fields) >= 4 { + if mnt := fields[4]; strings.HasPrefix(mnt, daemon.root) { + for _, p := range regexps { + if p.MatchString(mnt) { + if err := unmount(mnt); err != nil { + logrus.Error(err) + errors = append(errors, err.Error()) + } + } + } + } + } + } + + if err := sc.Err(); err != nil { + return err + } + + if len(errors) > 0 { + return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n")) + } + + logrus.Debugf("Cleaning up old mountid %v: done.", id) + return nil +} + +// cleanupMounts umounts shm/mqueue mounts for old containers +func (daemon *Daemon) cleanupMounts() error { + return daemon.cleanupMountsByID("") +} + +func getCleanPatterns(id string) (regexps []*regexp.Regexp) { + var patterns []string + if id == "" { + id = "[0-9a-f]{64}" + patterns = append(patterns, "containers/"+id+"/shm") + } + patterns = append(patterns, "aufs/mnt/"+id+"$", "overlay/"+id+"/merged$", "zfs/graph/"+id+"$") + for _, p := range patterns { + r, err := regexp.Compile(p) + if err == nil { + regexps = append(regexps, r) + } + } + return +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_stub.go b/vendor/github.com/docker/docker/daemon/daemon_stub.go new file mode 100644 index 00000000..40e8ddc8 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_stub.go @@ -0,0 +1,9 @@ +// +build !experimental + +package daemon + +import "github.com/docker/engine-api/types/container" + +func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unix.go b/vendor/github.com/docker/docker/daemon/daemon_unix.go new file mode 100644 index 00000000..1e2e5ceb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_unix.go @@ -0,0 +1,955 @@ +// +build linux freebsd + +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "runtime/debug" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/reference" + "github.com/docker/docker/runconfig" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + pblkiodev "github.com/docker/engine-api/types/blkiodev" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/opencontainers/specs/specs-go" +) + +const ( + // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 + linuxMinCPUShares = 2 + linuxMaxCPUShares = 262144 + platformSupported = true + // It's not kernel limit, we want this 4M limit to supply a reasonable functional container + linuxMinMemory = 4194304 + // constants for remapped root settings + defaultIDSpecifier string = "default" + defaultRemappedID string = "dockremap" + + // constant for cgroup drivers + cgroupFsDriver = "cgroupfs" + cgroupSystemdDriver = "systemd" +) + +func getMemoryResources(config containertypes.Resources) *specs.Memory { + memory := specs.Memory{} + + if config.Memory > 0 { + limit := uint64(config.Memory) + memory.Limit = &limit + } + + if config.MemoryReservation > 0 { + reservation := uint64(config.MemoryReservation) + memory.Reservation = &reservation + } + + if config.MemorySwap != 0 { + swap := uint64(config.MemorySwap) + memory.Swap = &swap + } + + if config.MemorySwappiness != nil { + swappiness := uint64(*config.MemorySwappiness) + memory.Swappiness = &swappiness + } + + if config.KernelMemory != 0 { + kernelMemory := uint64(config.KernelMemory) + memory.Kernel = &kernelMemory + } + + return &memory +} + +func getCPUResources(config containertypes.Resources) *specs.CPU { + cpu := specs.CPU{} + + if config.CPUShares != 0 { + shares := uint64(config.CPUShares) + cpu.Shares = &shares + } + + if config.CpusetCpus != "" { + cpuset := config.CpusetCpus + cpu.Cpus = &cpuset + } + + if config.CpusetMems != "" { + cpuset := config.CpusetMems + cpu.Mems = &cpuset + } + + if config.CPUPeriod != 0 { + period := uint64(config.CPUPeriod) + cpu.Period = &period + } + + if config.CPUQuota != 0 { + quota := uint64(config.CPUQuota) + cpu.Quota = "a + } + + return &cpu +} + +func getBlkioWeightDevices(config containertypes.Resources) ([]specs.WeightDevice, error) { + var stat syscall.Stat_t + var blkioWeightDevices []specs.WeightDevice + + for _, weightDevice := range config.BlkioWeightDevice { + if err := syscall.Stat(weightDevice.Path, &stat); err != nil { + return nil, err + } + weight := weightDevice.Weight + d := specs.WeightDevice{Weight: &weight} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWeightDevices = append(blkioWeightDevices, d) + } + + return blkioWeightDevices, nil +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + var ( + labelOpts []string + err error + ) + + for _, opt := range config.SecurityOpt { + if opt == "no-new-privileges" { + container.NoNewPrivileges = true + } else { + var con []string + if strings.Contains(opt, "=") { + con = strings.SplitN(opt, "=", 2) + } else if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + logrus.Warnf("Security options with `:` as a separator are deprecated and will be completely unsupported in 1.13, use `=` instead.") + } + + if len(con) != 2 { + return fmt.Errorf("Invalid --security-opt 1: %q", opt) + } + + switch con[0] { + case "label": + labelOpts = append(labelOpts, con[1]) + case "apparmor": + container.AppArmorProfile = con[1] + case "seccomp": + container.SeccompProfile = con[1] + default: + return fmt.Errorf("Invalid --security-opt 2: %q", opt) + } + } + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func getBlkioReadIOpsDevices(config containertypes.Resources) ([]specs.ThrottleDevice, error) { + var blkioReadIOpsDevice []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, iopsDevice := range config.BlkioDeviceReadIOps { + if err := syscall.Stat(iopsDevice.Path, &stat); err != nil { + return nil, err + } + rate := iopsDevice.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioReadIOpsDevice = append(blkioReadIOpsDevice, d) + } + + return blkioReadIOpsDevice, nil +} + +func getBlkioWriteIOpsDevices(config containertypes.Resources) ([]specs.ThrottleDevice, error) { + var blkioWriteIOpsDevice []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, iopsDevice := range config.BlkioDeviceWriteIOps { + if err := syscall.Stat(iopsDevice.Path, &stat); err != nil { + return nil, err + } + rate := iopsDevice.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWriteIOpsDevice = append(blkioWriteIOpsDevice, d) + } + + return blkioWriteIOpsDevice, nil +} + +func getBlkioReadBpsDevices(config containertypes.Resources) ([]specs.ThrottleDevice, error) { + var blkioReadBpsDevice []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, bpsDevice := range config.BlkioDeviceReadBps { + if err := syscall.Stat(bpsDevice.Path, &stat); err != nil { + return nil, err + } + rate := bpsDevice.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioReadBpsDevice = append(blkioReadBpsDevice, d) + } + + return blkioReadBpsDevice, nil +} + +func getBlkioWriteBpsDevices(config containertypes.Resources) ([]specs.ThrottleDevice, error) { + var blkioWriteBpsDevice []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, bpsDevice := range config.BlkioDeviceWriteBps { + if err := syscall.Stat(bpsDevice.Path, &stat); err != nil { + return nil, err + } + rate := bpsDevice.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWriteBpsDevice = append(blkioWriteBpsDevice, d) + } + + return blkioWriteBpsDevice, nil +} + +func checkKernelVersion(k, major, minor int) bool { + if v, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("%s", err) + } else { + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} + +func checkKernel() error { + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.10 crashes are clearer. + // For details see https://github.com/docker/docker/issues/407 + if !checkKernelVersion(3, 10, 0) { + v, _ := kernel.GetKernelVersion() + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + logrus.Warnf("Your Linux kernel version %s can be unstable running docker. Please upgrade your kernel to 3.10.0.", v.String()) + } + } + return nil +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if adjustCPUShares && hostConfig.CPUShares > 0 { + // Handle unsupported CPUShares + if hostConfig.CPUShares < linuxMinCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) + hostConfig.CPUShares = linuxMinCPUShares + } else if hostConfig.CPUShares > linuxMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) + hostConfig.CPUShares = linuxMaxCPUShares + } + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + if hostConfig.ShmSize == 0 { + hostConfig.ShmSize = container.DefaultSHMSize + } + var err error + if hostConfig.SecurityOpt == nil { + hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode) + if err != nil { + return err + } + } + if hostConfig.MemorySwappiness == nil { + defaultSwappiness := int64(-1) + hostConfig.MemorySwappiness = &defaultSwappiness + } + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) { + warnings := []string{} + + // memory subsystem checks and adjustments + if resources.Memory != 0 && resources.Memory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") + } + if resources.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") + resources.Memory = 0 + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") + logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.") + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") + } + if resources.Memory == 0 && resources.MemorySwap > 0 && !update { + return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.") + } + if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + resources.MemorySwappiness = nil + } + if resources.MemorySwappiness != nil { + swappiness := *resources.MemorySwappiness + if swappiness < -1 || swappiness > 100 { + return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100.", swappiness) + } + } + if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.") + resources.MemoryReservation = 0 + } + if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.") + } + if resources.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + resources.KernelMemory = 0 + } + if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") + } + if resources.KernelMemory > 0 && !checkKernelVersion(4, 0, 0) { + warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + logrus.Warnf("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + } + if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { + // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point + // warning the caller if they already wanted the feature to be off + if *resources.OomKillDisable { + warnings = append(warnings, "Your kernel does not support OomKillDisable, OomKillDisable discarded.") + logrus.Warnf("Your kernel does not support OomKillDisable, OomKillDisable discarded.") + } + resources.OomKillDisable = nil + } + + if resources.PidsLimit != 0 && !sysInfo.PidsLimit { + warnings = append(warnings, "Your kernel does not support pids limit capabilities, pids limit discarded.") + logrus.Warnf("Your kernel does not support pids limit capabilities, pids limit discarded.") + resources.PidsLimit = 0 + } + + // cpu subsystem checks and adjustments + if resources.CPUShares > 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") + logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.") + resources.CPUShares = 0 + } + if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") + logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.") + resources.CPUPeriod = 0 + } + if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") + logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.") + resources.CPUQuota = 0 + } + + // cpuset subsystem checks and adjustments + if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") + logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.") + resources.CpusetCpus = "" + resources.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus.", resources.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s.", resources.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems.", resources.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s.", resources.CpusetMems, sysInfo.Mems) + } + + // blkio subsystem checks and adjustments + if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") + logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.") + resources.BlkioWeight = 0 + } + if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { + return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.") + } + if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { + warnings = append(warnings, "Your kernel does not support Block I/O weight_device.") + logrus.Warnf("Your kernel does not support Block I/O weight_device. Weight-device discarded.") + resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} + } + if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { + warnings = append(warnings, "Your kernel does not support Block read limit in bytes per second.") + logrus.Warnf("Your kernel does not support Block I/O read limit in bytes per second. --device-read-bps discarded.") + resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { + warnings = append(warnings, "Your kernel does not support Block write limit in bytes per second.") + logrus.Warnf("Your kernel does not support Block I/O write limit in bytes per second. --device-write-bps discarded.") + resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { + warnings = append(warnings, "Your kernel does not support Block read limit in IO per second.") + logrus.Warnf("Your kernel does not support Block I/O read limit in IO per second. -device-read-iops discarded.") + resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { + warnings = append(warnings, "Your kernel does not support Block write limit in IO per second.") + logrus.Warnf("Your kernel does not support Block I/O write limit in IO per second. --device-write-iops discarded.") + resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} + } + + return warnings, nil +} + +func (daemon *Daemon) getCgroupDriver() string { + cgroupDriver := cgroupFsDriver + + if UsingSystemd(daemon.configStore) { + cgroupDriver = cgroupSystemdDriver + } + return cgroupDriver +} + +// getCD gets the raw value of the native.cgroupdriver option, if set. +func getCD(config *Config) string { + for _, option := range config.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { + continue + } + return val + } + return "" +} + +// VerifyCgroupDriver validates native.cgroupdriver +func VerifyCgroupDriver(config *Config) error { + cd := getCD(config) + if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { + return nil + } + return fmt.Errorf("native.cgroupdriver option %s not supported", cd) +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *Config) bool { + return getCD(config) == cgroupSystemdDriver +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + sysInfo := sysinfo.New(true) + + warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) + if err != nil { + return warnings, err + } + + w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) + if err != nil { + return warnings, err + } + warnings = append(warnings, w...) + + if hostConfig.ShmSize < 0 { + return warnings, fmt.Errorf("SHM size must be greater then 0") + } + + if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { + return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000].", hostConfig.OomScoreAdj) + } + if sysInfo.IPv4ForwardingDisabled { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") + } + // check for various conflicting options with user namespaces + if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { + if hostConfig.Privileged { + return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces") + } + if hostConfig.NetworkMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host's network namespace when user namespaces are enabled") + } + if hostConfig.PidMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled") + } + if hostConfig.ReadonlyRootfs { + return warnings, fmt.Errorf("Cannot use the --read-only option when user namespaces are enabled") + } + } + if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { + // CgroupParent for systemd cgroup should be named as "xxx.slice" + if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { + return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + return warnings, nil +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + // Check for mutually incompatible config options + if config.bridgeConfig.Iface != "" && config.bridgeConfig.IP != "" { + return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") + } + if !config.bridgeConfig.EnableIPTables && !config.bridgeConfig.InterContainerCommunication { + return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") + } + if !config.bridgeConfig.EnableIPTables && config.bridgeConfig.EnableIPMasq { + config.bridgeConfig.EnableIPMasq = false + } + if err := VerifyCgroupDriver(config); err != nil { + return err + } + if config.CgroupParent != "" && UsingSystemd(config) { + if len(config.CgroupParent) <= 6 || !strings.HasSuffix(config.CgroupParent, ".slice") { + return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + if os.Geteuid() != 0 { + return fmt.Errorf("The Docker daemon needs to be run as root") + } + return checkKernel() +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *Config) error { + mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max") + if err != nil { + return err + } + mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) + if err != nil { + return err + } + maxThreads := (mtint / 100) * 90 + debug.SetMaxThreads(maxThreads) + logrus.Debugf("Golang's threads limit set to %d", maxThreads) + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + if config.EnableSelinuxSupport { + if selinuxEnabled() { + // As Docker on overlayFS and SELinux are incompatible at present, error on overlayfs being enabled + if driverName == "overlay" { + return fmt.Errorf("SELinux is not supported with the %s graph driver", driverName) + } + logrus.Debug("SELinux enabled successfully") + } else { + logrus.Warn("Docker could not enable SELinux on the host system") + } + } else { + selinuxSetDisabled() + } + return nil +} + +// setupInitLayer populates a directory with mountpoints suitable +// for bind-mounting things into the container. +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func setupInitLayer(initLayer string, rootUID, rootGID int) error { + for pth, typ := range map[string]string{ + "/dev/pts": "dir", + "/dev/shm": "dir", + "/proc": "dir", + "/sys": "dir", + "/.dockerenv": "file", + "/etc/resolv.conf": "file", + "/etc/hosts": "file", + "/etc/hostname": "file", + "/dev/console": "file", + "/etc/mtab": "/proc/mounts", + } { + parts := strings.Split(pth, "/") + prev := "/" + for _, p := range parts[1:] { + prev = filepath.Join(prev, p) + syscall.Unlink(filepath.Join(initLayer, prev)) + } + + if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { + if os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { + return err + } + switch typ { + case "dir": + if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { + return err + } + case "file": + f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { + return err + } + f.Chown(rootUID, rootGID) + f.Close() + default: + if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { + return err + } + } + } else { + return err + } + } + } + + // Layer is ready to use, if it wasn't before. + return nil +} + +// Parse the remapped root (user namespace) option, which can be one of: +// username - valid username from /etc/passwd +// username:groupname - valid username; valid groupname from /etc/group +// uid - 32-bit unsigned int valid Linux UID value +// uid:gid - uid value; 32-bit unsigned int Linux GID value +// +// If no groupname is specified, and a username is specified, an attempt +// will be made to lookup a gid for that username as a groupname +// +// If names are used, they are verified to exist in passwd/group +func parseRemappedRoot(usergrp string) (string, string, error) { + + var ( + userID, groupID int + username, groupname string + ) + + idparts := strings.Split(usergrp, ":") + if len(idparts) > 2 { + return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) + } + + if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { + // must be a uid; take it as valid + userID = int(uid) + luser, err := user.LookupUid(userID) + if err != nil { + return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) + } + username = luser.Name + if len(idparts) == 1 { + // if the uid was numeric and no gid was specified, take the uid as the gid + groupID = userID + lgrp, err := user.LookupGid(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) + } + groupname = lgrp.Name + } + } else { + lookupName := idparts[0] + // special case: if the user specified "default", they want Docker to create or + // use (after creation) the "dockremap" user/group for root remapping + if lookupName == defaultIDSpecifier { + lookupName = defaultRemappedID + } + luser, err := user.LookupUser(lookupName) + if err != nil && idparts[0] != defaultIDSpecifier { + // error if the name requested isn't the special "dockremap" ID + return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) + } else if err != nil { + // special case-- if the username == "default", then we have been asked + // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} + // ranges will be used for the user and group mappings in user namespaced containers + _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) + if err == nil { + return defaultRemappedID, defaultRemappedID, nil + } + return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) + } + username = luser.Name + if len(idparts) == 1 { + // we only have a string username, and no group specified; look up gid from username as group + group, err := user.LookupGroup(lookupName) + if err != nil { + return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) + } + groupID = group.Gid + groupname = group.Name + } + } + + if len(idparts) == 2 { + // groupname or gid is separately specified and must be resolved + // to a unsigned 32-bit gid + if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { + // must be a gid, take it as valid + groupID = int(gid) + lgrp, err := user.LookupGid(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) + } + groupname = lgrp.Name + } else { + // not a number; attempt a lookup + if _, err := user.LookupGroup(idparts[1]); err != nil { + return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) + } + groupname = idparts[1] + } + } + return username, groupname, nil +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + if runtime.GOOS != "linux" && config.RemappedRoot != "" { + return nil, nil, fmt.Errorf("User namespaces are only supported on Linux") + } + + // if the daemon was started with remapped root option, parse + // the config option to the int uid,gid values + var ( + uidMaps, gidMaps []idtools.IDMap + ) + if config.RemappedRoot != "" { + username, groupname, err := parseRemappedRoot(config.RemappedRoot) + if err != nil { + return nil, nil, err + } + if username == "root" { + // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op + // effectively + logrus.Warnf("User namespaces: root cannot be remapped with itself; user namespaces are OFF") + return uidMaps, gidMaps, nil + } + logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) + // update remapped root setting now that we have resolved them to actual names + config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) + + uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname) + if err != nil { + return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err) + } + } + return uidMaps, gidMaps, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + config.Root = rootDir + // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) + // so that syscalls executing as non-root, operating on subdirectories of the graph root + // (e.g. mounted layers of a container) can traverse this path. + // The user namespace support will create subdirectories for the remapped root host uid:gid + // pair owned by that same uid:gid pair for proper write access to those needed metadata and + // layer content subtrees. + if _, err := os.Stat(rootDir); err == nil { + // root current exists; verify the access bits are correct by setting them + if err = os.Chmod(rootDir, 0711); err != nil { + return err + } + } else if os.IsNotExist(err) { + // no root exists yet, create it 0711 with root:root ownership + if err := os.MkdirAll(rootDir, 0711); err != nil { + return err + } + } + + // if user namespaces are enabled we will create a subtree underneath the specified root + // with any/all specified remapped root uid/gid options on the daemon creating + // a new subdirectory with ownership set to the remapped uid/gid (so as to allow + // `chdir()` to work for containers namespaced to that uid/gid) + if config.RemappedRoot != "" { + config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID)) + logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) + // Create the root directory if it doesn't exists + if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil { + return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) + } + } + return nil +} + +// registerLinks writes the links to a file. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { + return nil + } + + for _, l := range hostConfig.Links { + name, alias, err := runconfigopts.ParseLink(l) + if err != nil { + return err + } + child, err := daemon.GetContainer(name) + if err != nil { + //An error from daemon.GetContainer() means this name could not be found + return fmt.Errorf("Could not get container for %s", name) + } + for child.HostConfig.NetworkMode.IsContainer() { + parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) + child, err = daemon.GetContainer(parts[1]) + if err != nil { + return fmt.Errorf("Could not get container for %s", parts[1]) + } + } + if child.HostConfig.NetworkMode.IsHost() { + return runconfig.ErrConflictHostNetworkAndLinks + } + if err := daemon.registerLink(container, child, alias); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + return container.WriteHostConfig() +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error { + // Unix has no custom images to register + return nil +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + return nil, err + } + s := &types.StatsJSON{} + cgs := stats.CgroupStats + if cgs != nil { + s.BlkioStats = types.BlkioStats{ + IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive), + IoServicedRecursive: copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive), + IoQueuedRecursive: copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive), + IoServiceTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive), + IoWaitTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive), + IoMergedRecursive: copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive), + IoTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive), + SectorsRecursive: copyBlkioEntry(cgs.BlkioStats.SectorsRecursive), + } + cpu := cgs.CpuStats + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: cpu.CpuUsage.TotalUsage, + PercpuUsage: cpu.CpuUsage.PercpuUsage, + UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, + UsageInUsermode: cpu.CpuUsage.UsageInUsermode, + }, + ThrottlingData: types.ThrottlingData{ + Periods: cpu.ThrottlingData.Periods, + ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, + ThrottledTime: cpu.ThrottlingData.ThrottledTime, + }, + } + mem := cgs.MemoryStats.Usage + s.MemoryStats = types.MemoryStats{ + Usage: mem.Usage, + MaxUsage: mem.MaxUsage, + Stats: cgs.MemoryStats.Stats, + Failcnt: mem.Failcnt, + Limit: mem.Limit, + } + // if the container does not set memory limit, use the machineMemory + if mem.Limit > daemon.statsCollector.machineMemory && daemon.statsCollector.machineMemory > 0 { + s.MemoryStats.Limit = daemon.statsCollector.machineMemory + } + if cgs.PidsStats != nil { + s.PidsStats = types.PidsStats{ + Current: cgs.PidsStats.Current, + } + } + } + s.Read = time.Unix(int64(stats.Timestamp), 0) + return s, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unsupported.go b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go new file mode 100644 index 00000000..987528f4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux,!freebsd,!windows + +package daemon + +const platformSupported = false diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_unix.go b/vendor/github.com/docker/docker/daemon/debugtrap_unix.go new file mode 100644 index 00000000..c4a11b07 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/debugtrap_unix.go @@ -0,0 +1,21 @@ +// +build !windows + +package daemon + +import ( + "os" + "os/signal" + "syscall" + + psignal "github.com/docker/docker/pkg/signal" +) + +func setupDumpStackTrap() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGUSR1) + go func() { + for range c { + psignal.DumpStacks() + } + }() +} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go b/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go new file mode 100644 index 00000000..fef1bd77 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!darwin,!freebsd,!windows + +package daemon + +func setupDumpStackTrap() { + return +} diff --git a/vendor/github.com/docker/docker/daemon/delete.go b/vendor/github.com/docker/docker/daemon/delete.go new file mode 100644 index 00000000..ec9d5c5f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/delete.go @@ -0,0 +1,157 @@ +package daemon + +import ( + "fmt" + "os" + "path" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/errors" + "github.com/docker/docker/layer" + volumestore "github.com/docker/docker/volume/store" + "github.com/docker/engine-api/types" +) + +// ContainerRm removes the container id from the filesystem. An error +// is returned if the container is not found, or if the remove +// fails. If the remove succeeds, the container name is released, and +// network links are removed. +func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + // Container state RemovalInProgress should be used to avoid races. + if inProgress := container.SetRemovalInProgress(); inProgress { + return nil + } + defer container.ResetRemovalInProgress() + + // check if container wasn't deregistered by previous rm since Get + if c := daemon.containers.Get(container.ID); c == nil { + return nil + } + + if config.RemoveLink { + return daemon.rmLink(container, name) + } + + err = daemon.cleanupContainer(container, config.ForceRemove) + if err == nil || config.ForceRemove { + if e := daemon.removeMountPoints(container, config.RemoveVolume); e != nil { + logrus.Error(e) + } + } + + return err +} + +func (daemon *Daemon) rmLink(container *container.Container, name string) error { + if name[0] != '/' { + name = "/" + name + } + parent, n := path.Split(name) + if parent == "/" { + return fmt.Errorf("Conflict, cannot remove the default name of the container") + } + + parent = strings.TrimSuffix(parent, "/") + pe, err := daemon.nameIndex.Get(parent) + if err != nil { + return fmt.Errorf("Cannot get parent %s for name %s", parent, name) + } + + daemon.releaseName(name) + parentContainer, _ := daemon.GetContainer(pe) + if parentContainer != nil { + daemon.linkIndex.unlink(name, container, parentContainer) + if err := daemon.updateNetwork(parentContainer); err != nil { + logrus.Debugf("Could not update network to remove link %s: %v", n, err) + } + } + return nil +} + +// cleanupContainer unregisters a container from the daemon, stops stats +// collection and cleanly removes contents and metadata from the filesystem. +func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) { + if container.IsRunning() { + if !forceRemove { + err := fmt.Errorf("You cannot remove a running container %s. Stop the container before attempting removal or use -f", container.ID) + return errors.NewRequestConflictError(err) + } + if err := daemon.Kill(container); err != nil { + return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) + } + } + + // stop collection of stats for the container regardless + // if stats are currently getting collected. + daemon.statsCollector.stopCollection(container) + + if err = daemon.containerStop(container, 3); err != nil { + return err + } + + // Mark container dead. We don't want anybody to be restarting it. + container.SetDead() + + // Save container state to disk. So that if error happens before + // container meta file got removed from disk, then a restart of + // docker should not make a dead container alive. + if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) { + logrus.Errorf("Error saving dying container to disk: %v", err) + } + + // If force removal is required, delete container from various + // indexes even if removal failed. + defer func() { + if err == nil || forceRemove { + daemon.nameIndex.Delete(container.ID) + daemon.linkIndex.delete(container) + selinuxFreeLxcContexts(container.ProcessLabel) + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + daemon.LogContainerEvent(container, "destroy") + } + }() + + if err = os.RemoveAll(container.Root); err != nil { + return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) + } + + // When container creation fails and `RWLayer` has not been created yet, we + // do not call `ReleaseRWLayer` + if container.RWLayer != nil { + metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) + layer.LogReleaseMetadata(metadata) + if err != nil && err != layer.ErrMountDoesNotExist { + return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err) + } + } + + return nil +} + +// VolumeRm removes the volume with the given name. +// If the volume is referenced by a container it is not removed +// This is called directly from the remote API +func (daemon *Daemon) VolumeRm(name string) error { + v, err := daemon.volumes.Get(name) + if err != nil { + return err + } + + if err := daemon.volumes.Remove(v); err != nil { + if volumestore.IsInUse(err) { + err := fmt.Errorf("Unable to remove volume, volume still in use: %v", err) + return errors.NewRequestConflictError(err) + } + return fmt.Errorf("Error while removing volume %s: %v", name, err) + } + daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/errors.go b/vendor/github.com/docker/docker/daemon/errors.go new file mode 100644 index 00000000..131c9a1e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/errors.go @@ -0,0 +1,57 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/errors" + "github.com/docker/docker/reference" +) + +func (d *Daemon) imageNotExistToErrcode(err error) error { + if dne, isDNE := err.(ErrImageDoesNotExist); isDNE { + if strings.Contains(dne.RefOrID, "@") { + e := fmt.Errorf("No such image: %s", dne.RefOrID) + return errors.NewRequestNotFoundError(e) + } + tag := reference.DefaultTag + ref, err := reference.ParseNamed(dne.RefOrID) + if err != nil { + e := fmt.Errorf("No such image: %s:%s", dne.RefOrID, tag) + return errors.NewRequestNotFoundError(e) + } + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } + e := fmt.Errorf("No such image: %s:%s", ref.Name(), tag) + return errors.NewRequestNotFoundError(e) + } + return err +} + +type errNotRunning struct { + containerID string +} + +func (e errNotRunning) Error() string { + return fmt.Sprintf("Container %s is not running", e.containerID) +} + +func (e errNotRunning) ContainerIsRunning() bool { + return false +} + +func errContainerIsRestarting(containerID string) error { + err := fmt.Errorf("Container %s is restarting, wait until the container is running", containerID) + return errors.NewRequestConflictError(err) +} + +func errExecNotFound(id string) error { + err := fmt.Errorf("No such exec instance '%s' found in daemon", id) + return errors.NewRequestNotFoundError(err) +} + +func errExecPaused(id string) error { + err := fmt.Errorf("Container %s is paused, unpause the container before exec", id) + return errors.NewRequestConflictError(err) +} diff --git a/vendor/github.com/docker/docker/daemon/events.go b/vendor/github.com/docker/docker/daemon/events.go new file mode 100644 index 00000000..7ffffbb6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events.go @@ -0,0 +1,71 @@ +package daemon + +import ( + "strings" + + "github.com/docker/docker/container" + "github.com/docker/engine-api/types/events" +) + +// LogContainerEvent generates an event related to a container with only the default attributes. +func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) { + daemon.LogContainerEventWithAttributes(container, action, map[string]string{}) +} + +// LogContainerEventWithAttributes generates an event related to a container with specific given attributes. +func (daemon *Daemon) LogContainerEventWithAttributes(container *container.Container, action string, attributes map[string]string) { + copyAttributes(attributes, container.Config.Labels) + if container.Config.Image != "" { + attributes["image"] = container.Config.Image + } + attributes["name"] = strings.TrimLeft(container.Name, "/") + + actor := events.Actor{ + ID: container.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.ContainerEventType, actor) +} + +// LogImageEvent generates an event related to a container with only the default attributes. +func (daemon *Daemon) LogImageEvent(imageID, refName, action string) { + daemon.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) +} + +// LogImageEventWithAttributes generates an event related to a container with specific given attributes. +func (daemon *Daemon) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { + img, err := daemon.GetImage(imageID) + if err == nil && img.Config != nil { + // image has not been removed yet. + // it could be missing if the event is `delete`. + copyAttributes(attributes, img.Config.Labels) + } + if refName != "" { + attributes["name"] = refName + } + actor := events.Actor{ + ID: imageID, + Attributes: attributes, + } + + daemon.EventsService.Log(action, events.ImageEventType, actor) +} + +// LogVolumeEvent generates an event related to a volume. +func (daemon *Daemon) LogVolumeEvent(volumeID, action string, attributes map[string]string) { + actor := events.Actor{ + ID: volumeID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.VolumeEventType, actor) +} + +// copyAttributes guarantees that labels are not mutated by event triggers. +func copyAttributes(attributes, labels map[string]string) { + if labels == nil { + return + } + for k, v := range labels { + attributes[k] = v + } +} diff --git a/vendor/github.com/docker/docker/daemon/events/events.go b/vendor/github.com/docker/docker/daemon/events/events.go new file mode 100644 index 00000000..ac1c98cd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/events.go @@ -0,0 +1,142 @@ +package events + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/pubsub" + eventtypes "github.com/docker/engine-api/types/events" +) + +const ( + eventsLimit = 64 + bufferSize = 1024 +) + +// Events is pubsub channel for events generated by the engine. +type Events struct { + mu sync.Mutex + events []eventtypes.Message + pub *pubsub.Publisher +} + +// New returns new *Events instance +func New() *Events { + return &Events{ + events: make([]eventtypes.Message, 0, eventsLimit), + pub: pubsub.NewPublisher(100*time.Millisecond, bufferSize), + } +} + +// Subscribe adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion), and a function to call +// to stop the stream of events. +func (e *Events) Subscribe() ([]eventtypes.Message, chan interface{}, func()) { + e.mu.Lock() + current := make([]eventtypes.Message, len(e.events)) + copy(current, e.events) + l := e.pub.Subscribe() + e.mu.Unlock() + + cancel := func() { + e.Evict(l) + } + return current, l, cancel +} + +// SubscribeTopic adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion). +func (e *Events) SubscribeTopic(since, sinceNano int64, ef *Filter) ([]eventtypes.Message, chan interface{}) { + e.mu.Lock() + + var topic func(m interface{}) bool + if ef != nil && ef.filter.Len() > 0 { + topic = func(m interface{}) bool { return ef.Include(m.(eventtypes.Message)) } + } + + buffered := e.loadBufferedEvents(since, sinceNano, topic) + + var ch chan interface{} + if topic != nil { + ch = e.pub.SubscribeTopic(topic) + } else { + // Subscribe to all events if there are no filters + ch = e.pub.Subscribe() + } + + e.mu.Unlock() + return buffered, ch +} + +// Evict evicts listener from pubsub +func (e *Events) Evict(l chan interface{}) { + e.pub.Evict(l) +} + +// Log broadcasts event to listeners. Each listener has 100 millisecond for +// receiving event or it will be skipped. +func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { + now := time.Now().UTC() + jm := eventtypes.Message{ + Action: action, + Type: eventType, + Actor: actor, + Time: now.Unix(), + TimeNano: now.UnixNano(), + } + + // fill deprecated fields for container and images + switch eventType { + case eventtypes.ContainerEventType: + jm.ID = actor.ID + jm.Status = action + jm.From = actor.Attributes["image"] + case eventtypes.ImageEventType: + jm.ID = actor.ID + jm.Status = action + } + + e.mu.Lock() + if len(e.events) == cap(e.events) { + // discard oldest event + copy(e.events, e.events[1:]) + e.events[len(e.events)-1] = jm + } else { + e.events = append(e.events, jm) + } + e.mu.Unlock() + e.pub.Publish(jm) +} + +// SubscribersCount returns number of event listeners +func (e *Events) SubscribersCount() int { + return e.pub.Len() +} + +// loadBufferedEvents iterates over the cached events in the buffer +// and returns those that were emitted before a specific date. +// The date is splitted in two values: +// - the `since` argument is a date timestamp without nanoseconds, or -1 to return an empty slice. +// - the `sinceNano` argument is the nanoseconds offset from the timestamp. +// It uses `time.Unix(seconds, nanoseconds)` to generate a valid date with those two first arguments. +// It filters those buffered messages with a topic function if it's not nil, otherwise it adds all messages. +func (e *Events) loadBufferedEvents(since, sinceNano int64, topic func(interface{}) bool) []eventtypes.Message { + var buffered []eventtypes.Message + if since == -1 { + return buffered + } + + sinceNanoUnix := time.Unix(since, sinceNano).UnixNano() + for i := len(e.events) - 1; i >= 0; i-- { + ev := e.events[i] + if ev.TimeNano < sinceNanoUnix { + break + } + if topic == nil || topic(ev) { + buffered = append([]eventtypes.Message{ev}, buffered...) + } + } + return buffered +} diff --git a/vendor/github.com/docker/docker/daemon/events/filter.go b/vendor/github.com/docker/docker/daemon/events/filter.go new file mode 100644 index 00000000..8936e371 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/filter.go @@ -0,0 +1,82 @@ +package events + +import ( + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" +) + +// Filter can filter out docker events from a stream +type Filter struct { + filter filters.Args +} + +// NewFilter creates a new Filter +func NewFilter(filter filters.Args) *Filter { + return &Filter{filter: filter} +} + +// Include returns true when the event ev is included by the filters +func (ef *Filter) Include(ev events.Message) bool { + return ef.filter.ExactMatch("event", ev.Action) && + ef.filter.ExactMatch("type", ev.Type) && + ef.matchContainer(ev) && + ef.matchVolume(ev) && + ef.matchNetwork(ev) && + ef.matchImage(ev) && + ef.matchLabels(ev.Actor.Attributes) +} + +func (ef *Filter) matchLabels(attributes map[string]string) bool { + if !ef.filter.Include("label") { + return true + } + return ef.filter.MatchKVList("label", attributes) +} + +func (ef *Filter) matchContainer(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ContainerEventType) +} + +func (ef *Filter) matchVolume(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.VolumeEventType) +} + +func (ef *Filter) matchNetwork(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.NetworkEventType) +} + +func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { + return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || + ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) +} + +// matchImage matches against both event.Actor.ID (for image events) +// and event.Actor.Attributes["image"] (for container events), so that any container that was created +// from an image will be included in the image events. Also compare both +// against the stripped repo name without any tags. +func (ef *Filter) matchImage(ev events.Message) bool { + id := ev.Actor.ID + nameAttr := "image" + var imageName string + + if ev.Type == events.ImageEventType { + nameAttr = "name" + } + + if n, ok := ev.Actor.Attributes[nameAttr]; ok { + imageName = n + } + return ef.filter.ExactMatch("image", id) || + ef.filter.ExactMatch("image", imageName) || + ef.filter.ExactMatch("image", stripTag(id)) || + ef.filter.ExactMatch("image", stripTag(imageName)) +} + +func stripTag(image string) string { + ref, err := reference.ParseNamed(image) + if err != nil { + return image + } + return ref.Name() +} diff --git a/vendor/github.com/docker/docker/daemon/exec.go b/vendor/github.com/docker/docker/daemon/exec.go new file mode 100644 index 00000000..be06845c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec.go @@ -0,0 +1,246 @@ +package daemon + +import ( + "fmt" + "io" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/errors" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/term" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/strslice" +) + +func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) { + // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. + container.ExecCommands.Add(config.ID, config) + // Storing execs in daemon for easy access via remote API. + d.execCommands.Add(config.ID, config) +} + +// ExecExists looks up the exec instance and returns a bool if it exists or not. +// It will also return the error produced by `getConfig` +func (d *Daemon) ExecExists(name string) (bool, error) { + if _, err := d.getExecConfig(name); err != nil { + return false, err + } + return true, nil +} + +// getExecConfig looks up the exec instance by name. If the container associated +// with the exec instance is stopped or paused, it will return an error. +func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { + ec := d.execCommands.Get(name) + + // If the exec is found but its container is not in the daemon's list of + // containers then it must have been deleted, in which case instead of + // saying the container isn't running, we should return a 404 so that + // the user sees the same error now that they will after the + // 5 minute clean-up loop is run which erases old/dead execs. + + if ec != nil { + if container := d.containers.Get(ec.ContainerID); container != nil { + if !container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) + } + if container.IsPaused() { + return nil, errExecPaused(container.ID) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return ec, nil + } + } + + return nil, errExecNotFound(name) +} + +func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { + container.ExecCommands.Delete(execConfig.ID) + d.execCommands.Delete(execConfig.ID) +} + +func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { + container, err := d.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + if container.IsPaused() { + return nil, errExecPaused(name) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return container, nil +} + +// ContainerExecCreate sets up an exec in a running container. +func (d *Daemon) ContainerExecCreate(config *types.ExecConfig) (string, error) { + container, err := d.getActiveContainer(config.Container) + if err != nil { + return "", err + } + + cmd := strslice.StrSlice(config.Cmd) + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd) + + keys := []byte{} + if config.DetachKeys != "" { + keys, err = term.ToBytes(config.DetachKeys) + if err != nil { + logrus.Warnf("Wrong escape keys provided (%s, error: %s) using default : ctrl-p ctrl-q", config.DetachKeys, err.Error()) + } + } + + execConfig := exec.NewConfig() + execConfig.OpenStdin = config.AttachStdin + execConfig.OpenStdout = config.AttachStdout + execConfig.OpenStderr = config.AttachStderr + execConfig.ContainerID = container.ID + execConfig.DetachKeys = keys + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = config.Tty + execConfig.Privileged = config.Privileged + execConfig.User = config.User + if len(execConfig.User) == 0 { + execConfig.User = container.Config.User + } + + d.registerExecCommand(container, execConfig) + + d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + return execConfig.ID, nil +} + +// ContainerExecStart starts a previously set up exec instance. The +// std streams are set up. +func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) (err error) { + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + ) + + ec, err := d.getExecConfig(name) + if err != nil { + return errExecNotFound(name) + } + + ec.Lock() + if ec.ExitCode != nil { + ec.Unlock() + err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) + return errors.NewRequestConflictError(err) + } + + if ec.Running { + ec.Unlock() + return fmt.Errorf("Error: Exec command %s is already running", ec.ID) + } + ec.Running = true + defer func() { + if err != nil { + ec.Running = false + exitCode := 126 + ec.ExitCode = &exitCode + } + }() + ec.Unlock() + + c := d.containers.Get(ec.ContainerID) + logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) + d.LogContainerEvent(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " ")) + + if ec.OpenStdin && stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debugf("Closing buffered stdin pipe") + pools.Copy(w, stdin) + }() + cStdin = r + } + if ec.OpenStdout { + cStdout = stdout + } + if ec.OpenStderr { + cStderr = stderr + } + + if ec.OpenStdin { + ec.NewInputPipes() + } else { + ec.NewNopInputPipe() + } + + p := libcontainerd.Process{ + Args: append([]string{ec.Entrypoint}, ec.Args...), + Terminal: ec.Tty, + } + + if err := execSetPlatformOpt(c, ec, &p); err != nil { + return nil + } + + attachErr := container.AttachStreams(context.Background(), ec.StreamConfig, ec.OpenStdin, true, ec.Tty, cStdin, cStdout, cStderr, ec.DetachKeys) + + if err := d.containerd.AddProcess(c.ID, name, p); err != nil { + return err + } + + err = <-attachErr + if err != nil { + return fmt.Errorf("attach failed with error: %v", err) + } + return nil +} + +// execCommandGC runs a ticker to clean up the daemon references +// of exec configs that are no longer part of the container. +func (d *Daemon) execCommandGC() { + for range time.Tick(5 * time.Minute) { + var ( + cleaned int + liveExecCommands = d.containerExecIds() + ) + for id, config := range d.execCommands.Commands() { + if config.CanRemove { + cleaned++ + d.execCommands.Delete(id) + } else { + if _, exists := liveExecCommands[id]; !exists { + config.CanRemove = true + } + } + } + if cleaned > 0 { + logrus.Debugf("clean %d unused exec commands", cleaned) + } + } +} + +// containerExecIds returns a list of all the current exec ids that are in use +// and running inside a container. +func (d *Daemon) containerExecIds() map[string]struct{} { + ids := map[string]struct{}{} + for _, c := range d.containers.List() { + for _, id := range c.ExecCommands.List() { + ids[id] = struct{}{} + } + } + return ids +} diff --git a/vendor/github.com/docker/docker/daemon/exec/exec.go b/vendor/github.com/docker/docker/daemon/exec/exec.go new file mode 100644 index 00000000..bbeb1c16 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec/exec.go @@ -0,0 +1,93 @@ +package exec + +import ( + "sync" + + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" +) + +// Config holds the configurations for execs. The Daemon keeps +// track of both running and finished execs so that they can be +// examined both during and after completion. +type Config struct { + sync.Mutex + *runconfig.StreamConfig + ID string + Running bool + ExitCode *int + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Entrypoint string + Args []string + Tty bool + Privileged bool + User string +} + +// NewConfig initializes the a new exec configuration +func NewConfig() *Config { + return &Config{ + ID: stringid.GenerateNonCryptoID(), + StreamConfig: runconfig.NewStreamConfig(), + } +} + +// Store keeps track of the exec configurations. +type Store struct { + commands map[string]*Config + sync.RWMutex +} + +// NewStore initializes a new exec store. +func NewStore() *Store { + return &Store{commands: make(map[string]*Config, 0)} +} + +// Commands returns the exec configurations in the store. +func (e *Store) Commands() map[string]*Config { + e.RLock() + commands := make(map[string]*Config, len(e.commands)) + for id, config := range e.commands { + commands[id] = config + } + e.RUnlock() + return commands +} + +// Add adds a new exec configuration to the store. +func (e *Store) Add(id string, Config *Config) { + e.Lock() + e.commands[id] = Config + e.Unlock() +} + +// Get returns an exec configuration by its id. +func (e *Store) Get(id string) *Config { + e.RLock() + res := e.commands[id] + e.RUnlock() + return res +} + +// Delete removes an exec configuration from the store. +func (e *Store) Delete(id string) { + e.Lock() + delete(e.commands, id) + e.Unlock() +} + +// List returns the list of exec ids in the store. +func (e *Store) List() []string { + var IDs []string + e.RLock() + for id := range e.commands { + IDs = append(IDs, id) + } + e.RUnlock() + return IDs +} diff --git a/vendor/github.com/docker/docker/daemon/exec_linux.go b/vendor/github.com/docker/docker/daemon/exec_linux.go new file mode 100644 index 00000000..a2c86b28 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec_linux.go @@ -0,0 +1,26 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + if len(ec.User) > 0 { + uid, gid, additionalGids, err := getUser(c, ec.User) + if err != nil { + return err + } + p.User = &libcontainerd.User{ + UID: uid, + GID: gid, + AdditionalGids: additionalGids, + } + } + if ec.Privileged { + p.Capabilities = caps.GetAllCapabilities() + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/export.go b/vendor/github.com/docker/docker/daemon/export.go new file mode 100644 index 00000000..80d7dbb2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/export.go @@ -0,0 +1,55 @@ +package daemon + +import ( + "fmt" + "io" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerExport writes the contents of the container to the given +// writer. An error is returned if the container cannot be found. +func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + data, err := daemon.containerExport(container) + if err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(out, data); err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + return nil +} + +func (daemon *Daemon) containerExport(container *container.Container) (archive.Archive, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + }) + if err != nil { + daemon.Unmount(container) + return nil, err + } + arch := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + daemon.Unmount(container) + return err + }) + daemon.LogContainerEvent(container, "export") + return arch, err +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/counter.go b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go new file mode 100644 index 00000000..572fc9be --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go @@ -0,0 +1,32 @@ +package graphdriver + +import "sync" + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]int + mu sync.Mutex +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter() *RefCounter { + return &RefCounter{counts: make(map[string]int)} +} + +// Increment increaes the ref count for the given id and returns the current count +func (c *RefCounter) Increment(id string) int { + c.mu.Lock() + c.counts[id]++ + count := c.counts[id] + c.mu.Unlock() + return count +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(id string) int { + c.mu.Lock() + c.counts[id]-- + count := c.counts[id] + c.mu.Unlock() + return count +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go new file mode 100644 index 00000000..ced960b4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go @@ -0,0 +1,234 @@ +package graphdriver + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites retuned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // Create creates a new, empty, filesystem layer with the + // specified id and parent and mountLabel. Parent and mountLabel may be "". + Create(id, parent, mountLabel string) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (archive.Archive, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers a InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + if pluginDriver, err := lookupPlugin(name, home, options); err == nil { + return pluginDriver, nil + } + logrus.Errorf("Failed to GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// New creates the driver and initializes it at the specified root. +func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver + return GetDriver(name, root, options, uidMaps, gidMaps) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%q contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver %q", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(root, name), options, uidMaps, gidMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// isDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func isDriverNotSupported(err error) bool { + return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + driversMap[driver] = true + } + } + return driversMap +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go new file mode 100644 index 00000000..2891a84f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go @@ -0,0 +1,19 @@ +package graphdriver + +import "syscall" + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go new file mode 100644 index 00000000..2ab20b01 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go @@ -0,0 +1,99 @@ +// +build linux + +package graphdriver + +import ( + "path/filepath" + "syscall" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "btrfs", + "zfs", + "devicemapper", + "overlay", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go new file mode 100644 index 00000000..b3f68573 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!freebsd + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go new file mode 100644 index 00000000..5a349325 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go @@ -0,0 +1,162 @@ +package graphdriver + +import ( + "time" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + options := &archive.TarOptions{UIDMaps: gdw.uidMaps, + GIDMaps: gdw.gidMaps} + start := time.Now().UTC() + logrus.Debugf("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go new file mode 100644 index 00000000..7d81a83a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go @@ -0,0 +1,169 @@ +// +build linux + +package overlay + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +type copyFlags int + +const ( + copyHardlink copyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags copyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags©Hardlink != 0 { + isHardlink = true + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go new file mode 100644 index 00000000..47cddbea --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go @@ -0,0 +1,486 @@ +// +build linux + +package overlay + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "github.com/docker/containerd/subreaper/exec" + "path" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + + "github.com/opencontainers/runc/libcontainer/label" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") +) + +// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + // ApplyDiff writes the diff to the archive for the given id and parent id. + // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. + ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), + applyDiff: driver, + } +} + +// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlay union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlay. + +// If they use overlay there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlay to work. + +// When a overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with a empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy in the parent's "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + pathCacheLock sync.Mutex + pathCache map[string]string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +var backingFs = "" + +func init() { + graphdriver.Register("overlay", Init) +} + +// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + // check if they are running over btrfs or aufs + switch fsMagic { + case graphdriver.FsMagicBtrfs: + logrus.Error("'overlay' is not supported over btrfs.") + return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicAufs: + logrus.Error("'overlay' is not supported over aufs.") + return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicZfs: + logrus.Error("'overlay' is not supported over zfs.") + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + d := &Driver{ + home: home, + pathCache: make(map[string]string), + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(), + } + + return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlay" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + } +} + +// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := make(map[string]string) + + // If id has a root, it is an image + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + metadata["RootDir"] = rootDir + return metadata, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return nil, err + } + + metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") + metadata["UpperDir"] = path.Join(dir, "upper") + metadata["WorkDir"] = path.Join(dir, "work") + metadata["MergedDir"] = path.Join(dir, "merged") + + return metadata, nil +} + +// Cleanup simply returns nil and do not change the existing filesystem. +// This is required to satisfy the graphdriver.Driver interface. +func (d *Driver) Cleanup() error { + return nil +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent, mountLabel string) (retErr error) { + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do a overlay to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + d.pathCacheLock.Lock() + delete(d.pathCache, id) + d.pathCacheLock.Unlock() + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + d.pathCacheLock.Lock() + d.pathCache[id] = rootDir + d.pathCacheLock.Unlock() + return rootDir, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + lowerDir := path.Join(d.dir(string(lowerID)), "root") + upperDir := path.Join(dir, "upper") + workDir := path.Join(dir, "work") + mergedDir := path.Join(dir, "merged") + + if count := d.ctr.Increment(id); count > 1 { + return mergedDir, nil + } + + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + + // if it's mounted already, just return + mounted, err := d.mounted(mergedDir) + if err != nil { + d.ctr.Decrement(id) + return "", err + } + if mounted { + d.ctr.Decrement(id) + return mergedDir, nil + } + + if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + d.ctr.Decrement(id) + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(id) + syscall.Unmount(mergedDir, 0) + return "", err + } + + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + d.ctr.Decrement(id) + syscall.Unmount(mergedDir, 0) + return "", err + } + + d.pathCacheLock.Lock() + d.pathCache[id] = mergedDir + d.pathCacheLock.Unlock() + + return mergedDir, nil +} + +func (d *Driver) mounted(dir string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicOverlay, dir) +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + if count := d.ctr.Decrement(id); count > 0 { + return nil + } + d.pathCacheLock.Lock() + mountpoint, exists := d.pathCache[id] + d.pathCacheLock.Unlock() + + if !exists { + logrus.Debugf("Put on a non-mounted device %s", id) + // but it might be still here + if d.Exists(id) { + mountpoint = path.Join(d.dir(id), "merged") + } + + d.pathCacheLock.Lock() + d.pathCache[id] = mountpoint + d.pathCacheLock.Unlock() + } + + if mounted, err := d.mounted(mountpoint); mounted || err != nil { + if err = syscall.Unmount(mountpoint, 0); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return err + } + return nil +} + +// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return a ErrApplyDiffFallback error. +func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { + return 0, err + } + + options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} + if size, err = chrootarchive.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + return +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go new file mode 100644 index 00000000..3dbb4de4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go new file mode 100644 index 00000000..d63161b0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go @@ -0,0 +1,32 @@ +// +build experimental + +package graphdriver + +import ( + "fmt" + "io" + + "github.com/docker/docker/pkg/plugins" +) + +type pluginClient interface { + // Call calls the specified method with the specified arguments for the plugin. + Call(string, interface{}, interface{}) error + // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream + Stream(string, interface{}) (io.ReadCloser, error) + // SendFile calls the specified method, and passes through the IO stream + SendFile(string, io.Reader, interface{}) error +} + +func lookupPlugin(name, home string, opts []string) (Driver, error) { + pl, err := plugins.Get(name, "GraphDriver") + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, home, opts, pl.Client) +} + +func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) { + proxy := &graphDriverProxy{name, c} + return proxy, proxy.Init(home, opts) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/plugin_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/plugin_unsupported.go new file mode 100644 index 00000000..daa7a170 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/plugin_unsupported.go @@ -0,0 +1,7 @@ +// +build !experimental + +package graphdriver + +func lookupPlugin(name, home string, opts []string) (Driver, error) { + return nil, ErrNotSupported +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go new file mode 100644 index 00000000..28657fef --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go @@ -0,0 +1,209 @@ +// +build experimental + +package graphdriver + +import ( + "errors" + "fmt" + + "github.com/docker/docker/pkg/archive" +) + +type graphDriverProxy struct { + name string + client pluginClient +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string +} + +func (d *graphDriverProxy) Init(home string, opts []string) error { + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) Create(id, parent, mountLabel string) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil { + return "", err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return ret.Dir, err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.client.Stream("GraphDriver.Diff", args) + if err != nil { + return nil, err + } + return archive.Archive(body), nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go new file mode 100644 index 00000000..3a952642 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_overlay,linux + +package register + +import ( + // register the overlay graphdriver + _ "github.com/docker/docker/daemon/graphdriver/overlay" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go new file mode 100644 index 00000000..98fad23b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/docker/docker/daemon/graphdriver/vfs" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go new file mode 100644 index 00000000..00d9f8ec --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go @@ -0,0 +1,135 @@ +package vfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // CopyWithTar defines the copy method to use. + CopyWithTar = chrootarchive.CopyWithTar +) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent, mountLabel string) error { + dir := d.dir(id) + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { + return err + } + opts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(opts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := CopyWithTar(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return filepath.Join(d.home, "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/docker/docker/daemon/image_delete.go b/vendor/github.com/docker/docker/daemon/image_delete.go new file mode 100644 index 00000000..7c6329a6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_delete.go @@ -0,0 +1,371 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/errors" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" +) + +type conflictType int + +const ( + conflictDependentChild conflictType = (1 << iota) + conflictRunningContainer + conflictActiveReference + conflictStoppedContainer + conflictHard = conflictDependentChild | conflictRunningContainer + conflictSoft = conflictActiveReference | conflictStoppedContainer +) + +// ImageDelete deletes the image referenced by the given imageRef from this +// daemon. The given imageRef can be an image ID, ID prefix, or a repository +// reference (with an optional tag or digest, defaulting to the tag name +// "latest"). There is differing behavior depending on whether the given +// imageRef is a repository reference or not. +// +// If the given imageRef is a repository reference then that repository +// reference will be removed. However, if there exists any containers which +// were created using the same image reference then the repository reference +// cannot be removed unless either there are other repository references to the +// same image or force is true. Following removal of the repository reference, +// the referenced image itself will attempt to be deleted as described below +// but quietly, meaning any image delete conflicts will cause the image to not +// be deleted and the conflict will not be reported. +// +// There may be conflicts preventing deletion of an image and these conflicts +// are divided into two categories grouped by their severity: +// +// Hard Conflict: +// - a pull or build using the image. +// - any descendant image. +// - any running container using the image. +// +// Soft Conflict: +// - any stopped container using the image. +// - any repository tag or digest references to the image. +// +// The image cannot be removed if there are any hard conflicts and can be +// removed if there are soft conflicts only if force is true. +// +// If prune is true, ancestor images will each attempt to be deleted quietly, +// meaning any delete conflicts will cause the image to not be deleted and the +// conflict will not be reported. +// +// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph +// package. This would require that we no longer need the daemon to determine +// whether images are being used by a stopped or running container. +func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { + records := []types.ImageDelete{} + + imgID, err := daemon.GetImageID(imageRef) + if err != nil { + return nil, daemon.imageNotExistToErrcode(err) + } + + repoRefs := daemon.referenceStore.References(imgID) + + var removedRepositoryRef bool + if !isImageIDPrefix(imgID.String(), imageRef) { + // A repository reference was given and should be removed + // first. We can only remove this reference if either force is + // true, there are multiple repository references to this + // image, or there are no containers using the given reference. + if !(force || len(repoRefs) > 1) { + if container := daemon.getContainerUsingImage(imgID); container != nil { + // If we removed the repository reference then + // this image would remain "dangling" and since + // we really want to avoid that the client must + // explicitly force its removal. + err := fmt.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) + return nil, errors.NewRequestConflictError(err) + } + } + + parsedRef, err := reference.ParseNamed(imageRef) + if err != nil { + return nil, err + } + + parsedRef, err = daemon.removeImageRef(parsedRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + + repoRefs = daemon.referenceStore.References(imgID) + + // If this is a tag reference and all the remaining references + // to this image are digest references, delete the remaining + // references so that they don't prevent removal of the image. + if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { + foundTagRef := false + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical { + foundTagRef = true + break + } + } + if !foundTagRef { + for _, repoRef := range repoRefs { + if _, err := daemon.removeImageRef(repoRef); err != nil { + return records, err + } + + untaggedRecord := types.ImageDelete{Untagged: repoRef.String()} + records = append(records, untaggedRecord) + } + repoRefs = []reference.Named{} + } + } + + // If it has remaining references then the untag finished the remove + if len(repoRefs) > 0 { + return records, nil + } + + removedRepositoryRef = true + } else { + // If an ID reference was given AND there is exactly one + // repository reference to the image then we will want to + // remove that reference. + // FIXME: Is this the behavior we want? + if len(repoRefs) == 1 { + c := conflictHard + if !force { + c |= conflictSoft &^ conflictActiveReference + } + if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + return nil, conflict + } + + parsedRef, err := daemon.removeImageRef(repoRefs[0]) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + } + } + + return records, daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef) +} + +// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the +// given imageID. +func isImageIDPrefix(imageID, possiblePrefix string) bool { + if strings.HasPrefix(imageID, possiblePrefix) { + return true + } + + if i := strings.IndexRune(imageID, ':'); i >= 0 { + return strings.HasPrefix(imageID[i+1:], possiblePrefix) + } + + return false +} + +// getContainerUsingImage returns a container that was created using the given +// imageID. Returns nil if there is no such container. +func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { + return daemon.containers.First(func(c *container.Container) bool { + return c.ImageID == imageID + }) +} + +// removeImageRef attempts to parse and remove the given image reference from +// this daemon's store of repository tag/digest references. The given +// repositoryRef must not be an image ID but a repository name followed by an +// optional tag or digest reference. If tag or digest is omitted, the default +// tag is used. Returns the resolved image reference and an error. +func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) { + ref = reference.WithDefaultTag(ref) + // Ignore the boolean value returned, as far as we're concerned, this + // is an idempotent operation and it's okay if the reference didn't + // exist in the first place. + _, err := daemon.referenceStore.Delete(ref) + + return ref, err +} + +// removeAllReferencesToImageID attempts to remove every reference to the given +// imgID from this daemon's store of repository tag/digest references. Returns +// on the first encountered error. Removed references are logged to this +// daemon's event service. An "Untagged" types.ImageDelete is added to the +// given list of records. +func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDelete) error { + imageRefs := daemon.referenceStore.References(imgID) + + for _, imageRef := range imageRefs { + parsedRef, err := daemon.removeImageRef(imageRef) + if err != nil { + return err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + *records = append(*records, untaggedRecord) + } + + return nil +} + +// ImageDeleteConflict holds a soft or hard conflict and an associated error. +// Implements the error interface. +type imageDeleteConflict struct { + hard bool + used bool + imgID image.ID + message string +} + +func (idc *imageDeleteConflict) Error() string { + var forceMsg string + if idc.hard { + forceMsg = "cannot be forced" + } else { + forceMsg = "must be forced" + } + + return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) +} + +// imageDeleteHelper attempts to delete the given image from this daemon. If +// the image has any hard delete conflicts (child images or running containers +// using the image) then it cannot be deleted. If the image has any soft delete +// conflicts (any tags/digests referencing the image or any stopped container +// using the image) then it can only be deleted if force is true. If the delete +// succeeds and prune is true, the parent images are also deleted if they do +// not have any soft or hard delete conflicts themselves. Any deleted images +// and untagged references are appended to the given records. If any error or +// conflict is encountered, it will be returned immediately without deleting +// the image. If quiet is true, any encountered conflicts will be ignored and +// the function will return nil immediately without deleting the image. +func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDelete, force, prune, quiet bool) error { + // First, determine if this image has any conflicts. Ignore soft conflicts + // if force is true. + c := conflictHard + if !force { + c |= conflictSoft + } + if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) { + // Ignore conflicts UNLESS the image is "dangling" or not being used in + // which case we want the user to know. + return nil + } + + // There was a conflict and it's either a hard conflict OR we are not + // forcing deletion on soft conflicts. + return conflict + } + + parent, err := daemon.imageStore.GetParent(imgID) + if err != nil { + // There may be no parent + parent = "" + } + + // Delete all repository tag/digest references to this image. + if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil { + return err + } + + removedLayers, err := daemon.imageStore.Delete(imgID) + if err != nil { + return err + } + + daemon.LogImageEvent(imgID.String(), imgID.String(), "delete") + *records = append(*records, types.ImageDelete{Deleted: imgID.String()}) + for _, removedLayer := range removedLayers { + *records = append(*records, types.ImageDelete{Deleted: removedLayer.ChainID.String()}) + } + + if !prune || parent == "" { + return nil + } + + // We need to prune the parent image. This means delete it if there are + // no tags/digests referencing it and there are no containers using it ( + // either running or stopped). + // Do not force prunings, but do so quietly (stopping on any encountered + // conflicts). + return daemon.imageDeleteHelper(parent, records, false, true, true) +} + +// checkImageDeleteConflict determines whether there are any conflicts +// preventing deletion of the given image from this daemon. A hard conflict is +// any image which has the given image as a parent or any running container +// using the image. A soft conflict is any tags/digest referencing the given +// image or any stopped container using the image. If ignoreSoftConflicts is +// true, this function will not check for soft conflict conditions. +func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { + // Check if the image has any descendant images. + if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 { + return &imageDeleteConflict{ + hard: true, + imgID: imgID, + message: "image has dependent child images", + } + } + + if mask&conflictRunningContainer != 0 { + // Check if any running container is using the image. + running := func(c *container.Container) bool { + return c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(running); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + hard: true, + used: true, + message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), + } + } + } + + // Check if any repository tags/digest reference this image. + if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID)) > 0 { + return &imageDeleteConflict{ + imgID: imgID, + message: "image is referenced in one or more repositories", + } + } + + if mask&conflictStoppedContainer != 0 { + // Check if any stopped containers reference this image. + stopped := func(c *container.Container) bool { + return !c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(stopped); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + used: true, + message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), + } + } + } + + return nil +} + +// imageIsDangling returns whether the given image is "dangling" which means +// that there are no repository references to the given image and it has no +// child images. +func (daemon *Daemon) imageIsDangling(imgID image.ID) bool { + return !(len(daemon.referenceStore.References(imgID)) > 0 || len(daemon.imageStore.Children(imgID)) > 0) +} diff --git a/vendor/github.com/docker/docker/daemon/images.go b/vendor/github.com/docker/docker/daemon/images.go new file mode 100644 index 00000000..e4c3797f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images.go @@ -0,0 +1,162 @@ +package daemon + +import ( + "fmt" + "path" + "sort" + + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +var acceptedImageFilterTags = map[string]bool{ + "dangling": true, + "label": true, +} + +// byCreated is a temporary type used to sort a list of images by creation +// time. +type byCreated []*types.Image + +func (r byCreated) Len() int { return len(r) } +func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } + +// Map returns a map of all images in the ImageStore +func (daemon *Daemon) Map() map[image.ID]*image.Image { + return daemon.imageStore.Map() +} + +// Images returns a filtered list of images. filterArgs is a JSON-encoded set +// of filter arguments which will be interpreted by api/types/filters. +// filter is a shell glob string applied to repository names. The argument +// named all controls whether all images in the graph are filtered, or just +// the heads. +func (daemon *Daemon) Images(filterArgs, filter string, all bool) ([]*types.Image, error) { + var ( + allImages map[image.ID]*image.Image + err error + danglingOnly = false + ) + + imageFilters, err := filters.FromParam(filterArgs) + if err != nil { + return nil, err + } + if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { + return nil, err + } + + if imageFilters.Include("dangling") { + if imageFilters.ExactMatch("dangling", "true") { + danglingOnly = true + } else if !imageFilters.ExactMatch("dangling", "false") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) + } + } + if danglingOnly { + allImages = daemon.imageStore.Heads() + } else { + allImages = daemon.imageStore.Map() + } + + images := []*types.Image{} + + var filterTagged bool + if filter != "" { + filterRef, err := reference.ParseNamed(filter) + if err == nil { // parse error means wildcard repo + if _, ok := filterRef.(reference.NamedTagged); ok { + filterTagged = true + } + } + } + + for id, img := range allImages { + if imageFilters.Include("label") { + // Very old image that do not have image.Config (or even labels) + if img.Config == nil { + continue + } + // We are now sure image.Config is not nil + if !imageFilters.MatchKVList("label", img.Config.Labels) { + continue + } + } + + layerID := img.RootFS.ChainID() + var size int64 + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + + size, err = l.Size() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + } + + newImage := newImage(img, size) + + for _, ref := range daemon.referenceStore.References(id) { + if filter != "" { // filter by tag/repo name + if filterTagged { // filter by tag, require full ref match + if ref.String() != filter { + continue + } + } else if matched, err := path.Match(filter, ref.Name()); !matched || err != nil { // name only match, FIXME: docs say exact + continue + } + } + if _, ok := ref.(reference.Canonical); ok { + newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) + } + if _, ok := ref.(reference.NamedTagged); ok { + newImage.RepoTags = append(newImage.RepoTags, ref.String()) + } + } + if newImage.RepoDigests == nil && newImage.RepoTags == nil { + if all || len(daemon.imageStore.Children(id)) == 0 { + + if imageFilters.Include("dangling") && !danglingOnly { + //dangling=false case, so dangling image is not needed + continue + } + if filter != "" { // skip images with no references if filtering by tag + continue + } + newImage.RepoDigests = []string{"@"} + newImage.RepoTags = []string{":"} + } else { + continue + } + } else if danglingOnly { + continue + } + + images = append(images, newImage) + } + + sort.Sort(sort.Reverse(byCreated(images))) + + return images, nil +} + +func newImage(image *image.Image, size int64) *types.Image { + newImage := new(types.Image) + newImage.ParentID = image.Parent.String() + newImage.ID = image.ID().String() + newImage.Created = image.Created.Unix() + newImage.Size = size + newImage.VirtualSize = size + if image.Config != nil { + newImage.Labels = image.Config.Labels + } + return newImage +} diff --git a/vendor/github.com/docker/docker/daemon/import.go b/vendor/github.com/docker/docker/daemon/import.go new file mode 100644 index 00000000..4961a30f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/import.go @@ -0,0 +1,109 @@ +package daemon + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "runtime" + "time" + + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types/container" +) + +// ImportImage imports an image, getting the archived layer data either from +// inConfig (if src is "-"), or from a URI specified in src. Progress output is +// written to outStream. Repository and tag names can optionally be given in +// the repo and tag arguments, respectively. +func (daemon *Daemon) ImportImage(src string, newRef reference.Named, msg string, inConfig io.ReadCloser, outStream io.Writer, config *container.Config) error { + var ( + sf = streamformatter.NewJSONStreamFormatter() + rc io.ReadCloser + resp *http.Response + ) + + if src == "-" { + rc = inConfig + } else { + inConfig.Close() + u, err := url.Parse(src) + if err != nil { + return err + } + if u.Scheme == "" { + u.Scheme = "http" + u.Host = src + u.Path = "" + } + outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) + resp, err = httputils.Download(u.String()) + if err != nil { + return err + } + progressOutput := sf.NewProgressOutput(outStream, true) + rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") + } + + defer rc.Close() + if len(msg) == 0 { + msg = "Imported from " + src + } + + inflatedLayerData, err := archive.DecompressStream(rc) + if err != nil { + return err + } + // TODO: support windows baselayer? + l, err := daemon.layerStore.Register(inflatedLayerData, "") + if err != nil { + return err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + created := time.Now().UTC() + imgConfig, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Created: created, + Comment: msg, + }, + RootFS: &image.RootFS{ + Type: "layers", + DiffIDs: []layer.DiffID{l.DiffID()}, + }, + History: []image.History{{ + Created: created, + Comment: msg, + }}, + }) + if err != nil { + return err + } + + id, err := daemon.imageStore.Create(imgConfig) + if err != nil { + return err + } + + // FIXME: connect with commit code and call refstore directly + if newRef != nil { + if err := daemon.TagImage(newRef, id.String()); err != nil { + return err + } + } + + daemon.LogImageEvent(id.String(), id.String(), "import") + outStream.Write(sf.FormatStatus("", id.String())) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/info.go b/vendor/github.com/docker/docker/daemon/info.go new file mode 100644 index 00000000..c94cf9ba --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/info.go @@ -0,0 +1,162 @@ +package daemon + +import ( + "os" + "runtime" + "sync/atomic" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume/drivers" + "github.com/docker/engine-api/types" + "github.com/docker/go-connections/sockets" +) + +// SystemInfo returns information about the host server the daemon is running on. +func (daemon *Daemon) SystemInfo() (*types.Info, error) { + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + + operatingSystem := "" + if s, err := operatingsystem.GetOperatingSystem(); err != nil { + logrus.Warnf("Could not get operating system name: %v", err) + } else { + operatingSystem = s + } + + // Don't do containerized check on Windows + if runtime.GOOS != "windows" { + if inContainer, err := operatingsystem.IsContainerized(); err != nil { + logrus.Errorf("Could not determine if daemon is containerized: %v", err) + operatingSystem += " (error determining if containerized)" + } else if inContainer { + operatingSystem += " (containerized)" + } + } + + meminfo, err := system.ReadMemInfo() + if err != nil { + logrus.Errorf("Could not read system memory info: %v", err) + } + + sysInfo := sysinfo.New(true) + + var cRunning, cPaused, cStopped int32 + daemon.containers.ApplyAll(func(c *container.Container) { + switch c.StateString() { + case "paused": + atomic.AddInt32(&cPaused, 1) + case "running": + atomic.AddInt32(&cRunning, 1) + default: + atomic.AddInt32(&cStopped, 1) + } + }) + + v := &types.Info{ + ID: daemon.ID, + Containers: int(cRunning + cPaused + cStopped), + ContainersRunning: int(cRunning), + ContainersPaused: int(cPaused), + ContainersStopped: int(cStopped), + Images: len(daemon.imageStore.Map()), + Driver: daemon.GraphDriverName(), + DriverStatus: daemon.layerStore.DriverStatus(), + Plugins: daemon.showPluginsInfo(), + IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, + BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, + BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled, + Debug: utils.IsDebugEnabled(), + NFd: fileutils.GetTotalUsedFds(), + NGoroutines: runtime.NumGoroutine(), + SystemTime: time.Now().Format(time.RFC3339Nano), + LoggingDriver: daemon.defaultLogConfig.Type, + CgroupDriver: daemon.getCgroupDriver(), + NEventsListener: daemon.EventsService.SubscribersCount(), + KernelVersion: kernelVersion, + OperatingSystem: operatingSystem, + IndexServerAddress: registry.IndexServer, + OSType: platform.OSType, + Architecture: platform.Architecture, + RegistryConfig: daemon.RegistryService.ServiceConfig(), + NCPU: runtime.NumCPU(), + MemTotal: meminfo.MemTotal, + DockerRootDir: daemon.configStore.Root, + Labels: daemon.configStore.Labels, + ExperimentalBuild: utils.ExperimentalBuild(), + ServerVersion: dockerversion.Version, + ClusterStore: daemon.configStore.ClusterStore, + ClusterAdvertise: daemon.configStore.ClusterAdvertise, + HTTPProxy: sockets.GetProxyEnv("http_proxy"), + HTTPSProxy: sockets.GetProxyEnv("https_proxy"), + NoProxy: sockets.GetProxyEnv("no_proxy"), + } + + // TODO Windows. Refactor this more once sysinfo is refactored into + // platform specific code. On Windows, sysinfo.cgroupMemInfo and + // sysinfo.cgroupCpuInfo will be nil otherwise and cause a SIGSEGV if + // an attempt is made to access through them. + if runtime.GOOS != "windows" { + v.MemoryLimit = sysInfo.MemoryLimit + v.SwapLimit = sysInfo.SwapLimit + v.KernelMemory = sysInfo.KernelMemory + v.OomKillDisable = sysInfo.OomKillDisable + v.CPUCfsPeriod = sysInfo.CPUCfsPeriod + v.CPUCfsQuota = sysInfo.CPUCfsQuota + v.CPUShares = sysInfo.CPUShares + v.CPUSet = sysInfo.Cpuset + } + + if hostname, err := os.Hostname(); err == nil { + v.Name = hostname + } + + return v, nil +} + +// SystemVersion returns version information about the daemon. +func (daemon *Daemon) SystemVersion() types.Version { + v := types.Version{ + Version: dockerversion.Version, + GitCommit: dockerversion.GitCommit, + GoVersion: runtime.Version(), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + BuildTime: dockerversion.BuildTime, + Experimental: utils.ExperimentalBuild(), + } + + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + v.KernelVersion = kernelVersion + + return v +} + +func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { + var pluginsInfo types.PluginsInfo + + pluginsInfo.Volume = volumedrivers.GetDriverList() + + pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins + + return pluginsInfo +} diff --git a/vendor/github.com/docker/docker/daemon/inspect.go b/vendor/github.com/docker/docker/daemon/inspect.go new file mode 100644 index 00000000..2f810e3e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect.go @@ -0,0 +1,245 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/types" + networktypes "github.com/docker/engine-api/types/network" + "github.com/docker/engine-api/types/versions/v1p20" +) + +// ContainerInspect returns low-level information about a +// container. Returns an error if the container cannot be found, or if +// there is an error getting the data. +func (daemon *Daemon) ContainerInspect(name string, size bool, version version.Version) (interface{}, error) { + switch { + case version.LessThan("1.20"): + return daemon.containerInspectPre120(name) + case version.Equal("1.20"): + return daemon.containerInspect120(name) + } + return daemon.containerInspectCurrent(name, size) +} + +func (daemon *Daemon) containerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, size) + if err != nil { + return nil, err + } + + mountPoints := addMountPoints(container) + networkSettings := &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: container.NetworkSettings.Bridge, + SandboxID: container.NetworkSettings.SandboxID, + HairpinMode: container.NetworkSettings.HairpinMode, + LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, + Ports: container.NetworkSettings.Ports, + SandboxKey: container.NetworkSettings.SandboxKey, + SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, + SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), + Networks: container.NetworkSettings.Networks, + } + + return &types.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: container.Config, + NetworkSettings: networkSettings, + }, nil +} + +// containerInspect120 serializes the master version of a container into a json type. +func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, false) + if err != nil { + return nil, err + } + + mountPoints := addMountPoints(container) + config := &v1p20.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p20.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func (daemon *Daemon) getInspectData(container *container.Container, size bool) (*types.ContainerJSONBase, error) { + // make a copy to play with + hostConfig := *container.HostConfig + + children := daemon.children(container) + hostConfig.Links = nil // do not expose the internal structure + for linkAlias, child := range children { + hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) + } + + // we need this trick to preserve empty log driver, so + // container will use daemon defaults even if daemon changes them + if hostConfig.LogConfig.Type == "" { + hostConfig.LogConfig.Type = daemon.defaultLogConfig.Type + } + + if len(hostConfig.LogConfig.Config) == 0 { + hostConfig.LogConfig.Config = daemon.defaultLogConfig.Config + } + + containerState := &types.ContainerState{ + Status: container.State.StateString(), + Running: container.State.Running, + Paused: container.State.Paused, + Restarting: container.State.Restarting, + OOMKilled: container.State.OOMKilled, + Dead: container.State.Dead, + Pid: container.State.Pid, + ExitCode: container.State.ExitCode, + Error: container.State.Error, + StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), + FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), + } + + contJSONBase := &types.ContainerJSONBase{ + ID: container.ID, + Created: container.Created.Format(time.RFC3339Nano), + Path: container.Path, + Args: container.Args, + State: containerState, + Image: container.ImageID.String(), + LogPath: container.LogPath, + Name: container.Name, + RestartCount: container.RestartCount, + Driver: container.Driver, + MountLabel: container.MountLabel, + ProcessLabel: container.ProcessLabel, + ExecIDs: container.GetExecIDs(), + HostConfig: &hostConfig, + } + + var ( + sizeRw int64 + sizeRootFs int64 + ) + if size { + sizeRw, sizeRootFs = daemon.getSize(container) + contJSONBase.SizeRw = &sizeRw + contJSONBase.SizeRootFs = &sizeRootFs + } + + // Now set any platform-specific fields + contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) + + contJSONBase.GraphDriver.Name = container.Driver + + graphDriverData, err := container.RWLayer.Metadata() + if err != nil { + return nil, err + } + contJSONBase.GraphDriver.Data = graphDriverData + + return contJSONBase, nil +} + +// ContainerExecInspect returns low-level information about the exec +// command. An error is returned if the exec cannot be found. +func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, error) { + e, err := daemon.getExecConfig(id) + if err != nil { + return nil, err + } + + pc := inspectExecProcessConfig(e) + + return &backend.ExecInspect{ + ID: e.ID, + Running: e.Running, + ExitCode: e.ExitCode, + ProcessConfig: pc, + OpenStdin: e.OpenStdin, + OpenStdout: e.OpenStdout, + OpenStderr: e.OpenStderr, + CanRemove: e.CanRemove, + ContainerID: e.ContainerID, + DetachKeys: e.DetachKeys, + }, nil +} + +// VolumeInspect looks up a volume by name. An error is returned if +// the volume cannot be found. +func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { + v, err := daemon.volumes.Get(name) + if err != nil { + return nil, err + } + return volumeToAPIType(v), nil +} + +func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { + result := &v1p20.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: settings.Bridge, + SandboxID: settings.SandboxID, + HairpinMode: settings.HairpinMode, + LinkLocalIPv6Address: settings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: settings.LinkLocalIPv6PrefixLen, + Ports: settings.Ports, + SandboxKey: settings.SandboxKey, + SecondaryIPAddresses: settings.SecondaryIPAddresses, + SecondaryIPv6Addresses: settings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(settings.Networks), + } + + return result +} + +// getDefaultNetworkSettings creates the deprecated structure that holds the information +// about the bridge network for a container. +func (daemon *Daemon) getDefaultNetworkSettings(networks map[string]*networktypes.EndpointSettings) types.DefaultNetworkSettings { + var settings types.DefaultNetworkSettings + + if defaultNetwork, ok := networks["bridge"]; ok { + settings.EndpointID = defaultNetwork.EndpointID + settings.Gateway = defaultNetwork.Gateway + settings.GlobalIPv6Address = defaultNetwork.GlobalIPv6Address + settings.GlobalIPv6PrefixLen = defaultNetwork.GlobalIPv6PrefixLen + settings.IPAddress = defaultNetwork.IPAddress + settings.IPPrefixLen = defaultNetwork.IPPrefixLen + settings.IPv6Gateway = defaultNetwork.IPv6Gateway + settings.MacAddress = defaultNetwork.MacAddress + } + return settings +} diff --git a/vendor/github.com/docker/docker/daemon/inspect_unix.go b/vendor/github.com/docker/docker/daemon/inspect_unix.go new file mode 100644 index 00000000..6033c02d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect_unix.go @@ -0,0 +1,91 @@ +// +build !windows + +package daemon + +import ( + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/versions/v1p19" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + contJSONBase.AppArmorProfile = container.AppArmorProfile + contJSONBase.ResolvConfPath = container.ResolvConfPath + contJSONBase.HostnamePath = container.HostnamePath + contJSONBase.HostsPath = container.HostsPath + + return contJSONBase +} + +// containerInspectPre120 gets containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, false) + if err != nil { + return nil, err + } + + volumes := make(map[string]string) + volumesRW := make(map[string]bool) + for _, m := range container.MountPoints { + volumes[m.Destination] = m.Path() + volumesRW[m.Destination] = m.RW + } + + config := &v1p19.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + Memory: container.HostConfig.Memory, + MemorySwap: container.HostConfig.MemorySwap, + CPUShares: container.HostConfig.CPUShares, + CPUSet: container.HostConfig.CpusetCpus, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p19.ContainerJSON{ + ContainerJSONBase: base, + Volumes: volumes, + VolumesRW: volumesRW, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + Mode: m.Mode, + RW: m.RW, + Propagation: m.Propagation, + }) + } + return mountPoints +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + Privileged: &e.Privileged, + User: e.User, + } +} diff --git a/vendor/github.com/docker/docker/daemon/kill.go b/vendor/github.com/docker/docker/daemon/kill.go new file mode 100644 index 00000000..3967f0f2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/kill.go @@ -0,0 +1,153 @@ +package daemon + +import ( + "fmt" + "runtime" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/signal" +) + +type errNoSuchProcess struct { + pid int + signal int +} + +func (e errNoSuchProcess) Error() string { + return fmt.Sprintf("Cannot kill process (pid=%d) with signal %d: no such process.", e.pid, e.signal) +} + +// isErrNoSuchProcess returns true if the error +// is an instance of errNoSuchProcess. +func isErrNoSuchProcess(err error) bool { + _, ok := err.(errNoSuchProcess) + return ok +} + +// ContainerKill sends signal to the container +// If no signal is given (sig 0), then Kill with SIGKILL and wait +// for the container to exit. +// If a signal is given, then just send it to the container and return. +func (daemon *Daemon) ContainerKill(name string, sig uint64) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if sig != 0 && !signal.ValidSignalForPlatform(syscall.Signal(sig)) { + return fmt.Errorf("The %s daemon does not support signal %d", runtime.GOOS, sig) + } + + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + return daemon.Kill(container) + } + return daemon.killWithSignal(container, int(sig)) +} + +// killWithSignal sends the container the given signal. This wrapper for the +// host specific kill command prepares the container before attempting +// to send the signal. An error is returned if the container is paused +// or not running, or if there is a problem returned from the +// underlying kill command. +func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error { + logrus.Debugf("Sending %d to %s", sig, container.ID) + container.Lock() + defer container.Unlock() + + // We could unpause the container for them rather than returning this error + if container.Paused { + return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) + } + + if !container.Running { + return errNotRunning{container.ID} + } + + container.ExitOnNext() + + if !daemon.IsShuttingDown() { + container.HasBeenManuallyStopped = true + } + + // if the container is currently restarting we do not need to send the signal + // to the process. Telling the monitor that it should exit on it's next event + // loop is enough + if container.Restarting { + return nil + } + + if err := daemon.kill(container, sig); err != nil { + err = fmt.Errorf("Cannot kill container %s: %s", container.ID, err) + // if container or process not exists, ignore the error + if strings.Contains(err.Error(), "container not found") || + strings.Contains(err.Error(), "no such process") { + logrus.Warnf("%s", err.Error()) + } else { + return err + } + } + + attributes := map[string]string{ + "signal": fmt.Sprintf("%d", sig), + } + daemon.LogContainerEventWithAttributes(container, "kill", attributes) + return nil +} + +// Kill forcefully terminates a container. +func (daemon *Daemon) Kill(container *container.Container) error { + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + // 1. Send SIGKILL + if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // its probably because its already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we return it to the caller. + if isErrNoSuchProcess(err) { + return nil + } + + if container.IsRunning() { + container.WaitStop(2 * time.Second) + if container.IsRunning() { + return err + } + } + } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if err := killProcessDirectly(container); err != nil { + if isErrNoSuchProcess(err) { + return nil + } + return err + } + + container.WaitStop(-1 * time.Second) + return nil +} + +// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. +func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error { + err := daemon.killWithSignal(container, sig) + if err == syscall.ESRCH { + e := errNoSuchProcess{container.GetPID(), sig} + logrus.Debug(e) + return e + } + return err +} diff --git a/vendor/github.com/docker/docker/daemon/links.go b/vendor/github.com/docker/docker/daemon/links.go new file mode 100644 index 00000000..7f691d4f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links.go @@ -0,0 +1,87 @@ +package daemon + +import ( + "sync" + + "github.com/docker/docker/container" +) + +// linkIndex stores link relationships between containers, including their specified alias +// The alias is the name the parent uses to reference the child +type linkIndex struct { + // idx maps a parent->alias->child relationship + idx map[*container.Container]map[string]*container.Container + // childIdx maps child->parent->aliases + childIdx map[*container.Container]map[*container.Container]map[string]struct{} + mu sync.Mutex +} + +func newLinkIndex() *linkIndex { + return &linkIndex{ + idx: make(map[*container.Container]map[string]*container.Container), + childIdx: make(map[*container.Container]map[*container.Container]map[string]struct{}), + } +} + +// link adds indexes for the passed in parent/child/alias relationships +func (l *linkIndex) link(parent, child *container.Container, alias string) { + l.mu.Lock() + + if l.idx[parent] == nil { + l.idx[parent] = make(map[string]*container.Container) + } + l.idx[parent][alias] = child + if l.childIdx[child] == nil { + l.childIdx[child] = make(map[*container.Container]map[string]struct{}) + } + if l.childIdx[child][parent] == nil { + l.childIdx[child][parent] = make(map[string]struct{}) + } + l.childIdx[child][parent][alias] = struct{}{} + + l.mu.Unlock() +} + +// unlink removes the requested alias for the given parent/child +func (l *linkIndex) unlink(alias string, child, parent *container.Container) { + l.mu.Lock() + delete(l.idx[parent], alias) + delete(l.childIdx[child], parent) + l.mu.Unlock() +} + +// children maps all the aliases-> children for the passed in parent +// aliases here are the aliases the parent uses to refer to the child +func (l *linkIndex) children(parent *container.Container) map[string]*container.Container { + l.mu.Lock() + children := l.idx[parent] + l.mu.Unlock() + return children +} + +// parents maps all the aliases->parent for the passed in child +// aliases here are the aliases the parents use to refer to the child +func (l *linkIndex) parents(child *container.Container) map[string]*container.Container { + l.mu.Lock() + + parents := make(map[string]*container.Container) + for parent, aliases := range l.childIdx[child] { + for alias := range aliases { + parents[alias] = parent + } + } + + l.mu.Unlock() + return parents +} + +// delete deletes all link relationships referencing this container +func (l *linkIndex) delete(container *container.Container) { + l.mu.Lock() + for _, child := range l.idx[container] { + delete(l.childIdx[child], container) + } + delete(l.idx, container) + delete(l.childIdx, container) + l.mu.Unlock() +} diff --git a/vendor/github.com/docker/docker/daemon/links/links.go b/vendor/github.com/docker/docker/daemon/links/links.go new file mode 100644 index 00000000..af15de04 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links/links.go @@ -0,0 +1,141 @@ +package links + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/go-connections/nat" +) + +// Link struct holds informations about parent/child linked container +type Link struct { + // Parent container IP address + ParentIP string + // Child container IP address + ChildIP string + // Link name + Name string + // Child environments variables + ChildEnvironment []string + // Child exposed ports + Ports []nat.Port +} + +// NewLink initializes a new Link struct with the provided options. +func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) *Link { + var ( + i int + ports = make([]nat.Port, len(exposedPorts)) + ) + + for p := range exposedPorts { + ports[i] = p + i++ + } + + return &Link{ + Name: name, + ChildIP: childIP, + ParentIP: parentIP, + ChildEnvironment: env, + Ports: ports, + } +} + +// ToEnv creates a string's slice containing child container informations in +// the form of environment variables which will be later exported on container +// startup. +func (l *Link) ToEnv() []string { + env := []string{} + + _, n := path.Split(l.Name) + alias := strings.Replace(strings.ToUpper(n), "-", "_", -1) + + if p := l.getDefaultPort(); p != nil { + env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) + } + + //sort the ports so that we can bulk the continuous ports together + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + + for i := 0; i < len(l.Ports); { + p := l.Ports[i] + j := nextContiguous(l.Ports, p.Int(), i) + if j > i+1 { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + + q := l.Ports[j] + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) + + i = j + 1 + continue + } else { + i++ + } + } + for _, p := range l.Ports { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + } + + // Load the linked container's name into the environment + env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) + + if l.ChildEnvironment != nil { + for _, v := range l.ChildEnvironment { + parts := strings.SplitN(v, "=", 2) + if len(parts) < 2 { + continue + } + // Ignore a few variables that are added during docker build (and not really relevant to linked containers) + if parts[0] == "HOME" || parts[0] == "PATH" { + continue + } + env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) + } + } + return env +} + +func nextContiguous(ports []nat.Port, value int, index int) int { + if index+1 == len(ports) { + return index + } + for i := index + 1; i < len(ports); i++ { + if ports[i].Int() > value+1 { + return i - 1 + } + + value++ + } + return len(ports) - 1 +} + +// Default port rules +func (l *Link) getDefaultPort() *nat.Port { + var p nat.Port + i := len(l.Ports) + + if i == 0 { + return nil + } else if i > 1 { + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + } + p = l.Ports[0] + return &p +} diff --git a/vendor/github.com/docker/docker/daemon/list.go b/vendor/github.com/docker/docker/daemon/list.go new file mode 100644 index 00000000..44ab3cbc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list.go @@ -0,0 +1,515 @@ +package daemon + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/volume" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + networktypes "github.com/docker/engine-api/types/network" + "github.com/docker/go-connections/nat" +) + +var acceptedVolumeFilterTags = map[string]bool{ + "dangling": true, +} + +var acceptedPsFilterTags = map[string]bool{ + "ancestor": true, + "before": true, + "exited": true, + "id": true, + "isolation": true, + "label": true, + "name": true, + "status": true, + "since": true, + "volume": true, +} + +// iterationAction represents possible outcomes happening during the container iteration. +type iterationAction int + +// containerReducer represents a reducer for a container. +// Returns the object to serialize by the api. +type containerReducer func(*container.Container, *listContext) (*types.Container, error) + +const ( + // includeContainer is the action to include a container in the reducer. + includeContainer iterationAction = iota + // excludeContainer is the action to exclude a container in the reducer. + excludeContainer + // stopIteration is the action to stop iterating over the list of containers. + stopIteration +) + +// errStopIteration makes the iterator to stop without returning an error. +var errStopIteration = errors.New("container list iteration stopped") + +// List returns an array of all containers registered in the daemon. +func (daemon *Daemon) List() []*container.Container { + return daemon.containers.List() +} + +// listContext is the daemon generated filtering to iterate over containers. +// This is created based on the user specification from types.ContainerListOptions. +type listContext struct { + // idx is the container iteration index for this context + idx int + // ancestorFilter tells whether it should check ancestors or not + ancestorFilter bool + // names is a list of container names to filter with + names map[string][]string + // images is a list of images to filter with + images map[image.ID]bool + // filters is a collection of arguments to filter with, specified by the user + filters filters.Args + // exitAllowed is a list of exit codes allowed to filter with + exitAllowed []int + + // FIXME Remove this for 1.12 as --since and --before are deprecated + // beforeContainer is a filter to ignore containers that appear before the one given + beforeContainer *container.Container + // sinceContainer is a filter to stop the filtering when the iterator arrive to the given container + sinceContainer *container.Container + + // beforeFilter is a filter to ignore containers that appear before the one given + // this is used for --filter=before= and --before=, the latter is deprecated. + beforeFilter *container.Container + // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container + // this is used for --filter=since= and --since=, the latter is deprecated. + sinceFilter *container.Container + // ContainerListOptions is the filters set by the user + *types.ContainerListOptions +} + +// Containers returns the list of containers to show given the user's filtering. +func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { + return daemon.reduceContainers(config, daemon.transformContainer) +} + +// reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. +func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { + containers := []*types.Container{} + + ctx, err := daemon.foldFilter(config) + if err != nil { + return nil, err + } + + for _, container := range daemon.List() { + t, err := daemon.reducePsContainer(container, ctx, reducer) + if err != nil { + if err != errStopIteration { + return nil, err + } + break + } + if t != nil { + containers = append(containers, t) + ctx.idx++ + } + } + return containers, nil +} + +// reducePsContainer is the basic representation for a container as expected by the ps command. +func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) { + container.Lock() + defer container.Unlock() + + // filter containers to return + action := includeContainerInList(container, ctx) + switch action { + case excludeContainer: + return nil, nil + case stopIteration: + return nil, errStopIteration + } + + // transform internal container struct into api structs + return reducer(container, ctx) +} + +// foldFilter generates the container filter based on the user's filtering options. +func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listContext, error) { + psFilters := config.Filter + + if err := psFilters.Validate(acceptedPsFilterTags); err != nil { + return nil, err + } + + var filtExited []int + + err := psFilters.WalkValues("exited", func(value string) error { + code, err := strconv.Atoi(value) + if err != nil { + return err + } + filtExited = append(filtExited, code) + return nil + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("status", func(value string) error { + if !container.IsValidStateString(value) { + return fmt.Errorf("Unrecognised filter value for status: %s", value) + } + + config.All = true + return nil + }) + if err != nil { + return nil, err + } + + var beforeContFilter, sinceContFilter *container.Container + // FIXME remove this for 1.12 as --since and --before are deprecated + var beforeContainer, sinceContainer *container.Container + + err = psFilters.WalkValues("before", func(value string) error { + beforeContFilter, err = daemon.GetContainer(value) + return err + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("since", func(value string) error { + sinceContFilter, err = daemon.GetContainer(value) + return err + }) + if err != nil { + return nil, err + } + + imagesFilter := map[image.ID]bool{} + var ancestorFilter bool + if psFilters.Include("ancestor") { + ancestorFilter = true + psFilters.WalkValues("ancestor", func(ancestor string) error { + id, err := daemon.GetImageID(ancestor) + if err != nil { + logrus.Warnf("Error while looking up for image %v", ancestor) + return nil + } + if imagesFilter[id] { + // Already seen this ancestor, skip it + return nil + } + // Then walk down the graph and put the imageIds in imagesFilter + populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) + return nil + }) + } + + // FIXME remove this for 1.12 as --since and --before are deprecated + if config.Before != "" { + beforeContainer, err = daemon.GetContainer(config.Before) + if err != nil { + return nil, err + } + } + + // FIXME remove this for 1.12 as --since and --before are deprecated + if config.Since != "" { + sinceContainer, err = daemon.GetContainer(config.Since) + if err != nil { + return nil, err + } + } + + return &listContext{ + filters: psFilters, + ancestorFilter: ancestorFilter, + images: imagesFilter, + exitAllowed: filtExited, + beforeContainer: beforeContainer, + sinceContainer: sinceContainer, + beforeFilter: beforeContFilter, + sinceFilter: sinceContFilter, + ContainerListOptions: config, + names: daemon.nameIndex.GetAll(), + }, nil +} + +// includeContainerInList decides whether a container should be included in the output or not based in the filter. +// It also decides if the iteration should be stopped or not. +func includeContainerInList(container *container.Container, ctx *listContext) iterationAction { + // Do not include container if it's in the list before the filter container. + // Set the filter container to nil to include the rest of containers after this one. + if ctx.beforeFilter != nil { + if container.ID == ctx.beforeFilter.ID { + ctx.beforeFilter = nil + } + return excludeContainer + } + + // Stop iteration when the container arrives to the filter container + if ctx.sinceFilter != nil { + if container.ID == ctx.sinceFilter.ID { + return stopIteration + } + } + + // Do not include container if it's stopped and we're not filters + // FIXME remove the ctx.beforContainer and ctx.sinceContainer part of the condition for 1.12 as --since and --before are deprecated + if !container.Running && !ctx.All && ctx.Limit <= 0 && ctx.beforeContainer == nil && ctx.sinceContainer == nil { + return excludeContainer + } + + // Do not include container if the name doesn't match + if !ctx.filters.Match("name", container.Name) { + return excludeContainer + } + + // Do not include container if the id doesn't match + if !ctx.filters.Match("id", container.ID) { + return excludeContainer + } + + // Do not include container if any of the labels don't match + if !ctx.filters.MatchKVList("label", container.Config.Labels) { + return excludeContainer + } + + // Do not include container if isolation doesn't match + if excludeContainer == excludeByIsolation(container, ctx) { + return excludeContainer + } + + // FIXME remove this for 1.12 as --since and --before are deprecated + if ctx.beforeContainer != nil { + if container.ID == ctx.beforeContainer.ID { + ctx.beforeContainer = nil + } + return excludeContainer + } + + // FIXME remove this for 1.12 as --since and --before are deprecated + if ctx.sinceContainer != nil { + if container.ID == ctx.sinceContainer.ID { + return stopIteration + } + } + + // Stop iteration when the index is over the limit + if ctx.Limit > 0 && ctx.idx == ctx.Limit { + return stopIteration + } + + // Do not include container if its exit code is not in the filter + if len(ctx.exitAllowed) > 0 { + shouldSkip := true + for _, code := range ctx.exitAllowed { + if code == container.ExitCode && !container.Running { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + // Do not include container if its status doesn't match the filter + if !ctx.filters.Match("status", container.State.StateString()) { + return excludeContainer + } + + if ctx.filters.Include("volume") { + volumesByName := make(map[string]*volume.MountPoint) + for _, m := range container.MountPoints { + if m.Name != "" { + volumesByName[m.Name] = m + } else { + volumesByName[m.Source] = m + } + } + + volumeExist := fmt.Errorf("volume mounted in container") + err := ctx.filters.WalkValues("volume", func(value string) error { + if _, exist := container.MountPoints[value]; exist { + return volumeExist + } + if _, exist := volumesByName[value]; exist { + return volumeExist + } + return nil + }) + if err != volumeExist { + return excludeContainer + } + } + + if ctx.ancestorFilter { + if len(ctx.images) == 0 { + return excludeContainer + } + if !ctx.images[container.ImageID] { + return excludeContainer + } + } + + return includeContainer +} + +// transformContainer generates the container type expected by the docker ps command. +func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) { + newC := &types.Container{ + ID: container.ID, + Names: ctx.names[container.ID], + ImageID: container.ImageID.String(), + } + if newC.Names == nil { + // Dead containers will often have no name, so make sure the response isn't null + newC.Names = []string{} + } + + image := container.Config.Image // if possible keep the original ref + if image != container.ImageID.String() { + id, err := daemon.GetImageID(image) + if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { + return nil, err + } + if err != nil || id != container.ImageID { + image = container.ImageID.String() + } + } + newC.Image = image + + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + + newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + newC.Command = container.Path + } + newC.Created = container.Created.Unix() + newC.State = container.State.StateString() + newC.Status = container.State.String() + newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) + // copy networks to avoid races + networks := make(map[string]*networktypes.EndpointSettings) + for name, network := range container.NetworkSettings.Networks { + if network == nil { + continue + } + networks[name] = &networktypes.EndpointSettings{ + EndpointID: network.EndpointID, + Gateway: network.Gateway, + IPAddress: network.IPAddress, + IPPrefixLen: network.IPPrefixLen, + IPv6Gateway: network.IPv6Gateway, + GlobalIPv6Address: network.GlobalIPv6Address, + GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen, + MacAddress: network.MacAddress, + } + if network.IPAMConfig != nil { + networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{ + IPv4Address: network.IPAMConfig.IPv4Address, + IPv6Address: network.IPAMConfig.IPv6Address, + } + } + } + newC.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} + + newC.Ports = []types.Port{} + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + return nil, err + } + if len(bindings) == 0 { + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: p, + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { + return nil, err + } + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: p, + PublicPort: h, + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + + if ctx.Size { + sizeRw, sizeRootFs := daemon.getSize(container) + newC.SizeRw = sizeRw + newC.SizeRootFs = sizeRootFs + } + newC.Labels = container.Config.Labels + newC.Mounts = addMountPoints(container) + + return newC, nil +} + +// Volumes lists known volumes, using the filter to restrict the range +// of volumes returned. +func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { + var ( + volumesOut []*types.Volume + danglingOnly = false + ) + volFilters, err := filters.FromParam(filter) + if err != nil { + return nil, nil, err + } + + if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { + return nil, nil, err + } + + if volFilters.Include("dangling") { + if volFilters.ExactMatch("dangling", "true") || volFilters.ExactMatch("dangling", "1") { + danglingOnly = true + } else if !volFilters.ExactMatch("dangling", "false") && !volFilters.ExactMatch("dangling", "0") { + return nil, nil, fmt.Errorf("Invalid filter 'dangling=%s'", volFilters.Get("dangling")) + } + } + + volumes, warnings, err := daemon.volumes.List() + if err != nil { + return nil, nil, err + } + if volFilters.Include("dangling") { + volumes = daemon.volumes.FilterByUsed(volumes, !danglingOnly) + } + for _, v := range volumes { + volumesOut = append(volumesOut, volumeToAPIType(v)) + } + return volumesOut, warnings, nil +} + +func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { + if !ancestorMap[imageID] { + for _, id := range getChildren(imageID) { + populateImageFilterByParents(ancestorMap, id, getChildren) + } + ancestorMap[imageID] = true + } +} diff --git a/vendor/github.com/docker/docker/daemon/list_unix.go b/vendor/github.com/docker/docker/daemon/list_unix.go new file mode 100644 index 00000000..8dccbe4e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list_unix.go @@ -0,0 +1,11 @@ +// +build linux freebsd + +package daemon + +import "github.com/docker/docker/container" + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { + return includeContainer +} diff --git a/vendor/github.com/docker/docker/daemon/logdrivers_linux.go b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go new file mode 100644 index 00000000..568770e0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go @@ -0,0 +1,8 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/vendor/github.com/docker/docker/daemon/logger/context.go b/vendor/github.com/docker/docker/daemon/logger/context.go new file mode 100644 index 00000000..eb54c311 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/context.go @@ -0,0 +1,112 @@ +package logger + +import ( + "fmt" + "os" + "strings" + "time" +) + +// Context provides enough information for a logging driver to do its function. +type Context struct { + Config map[string]string + ContainerID string + ContainerName string + ContainerEntrypoint string + ContainerArgs []string + ContainerImageID string + ContainerImageName string + ContainerCreated time.Time + ContainerEnv []string + ContainerLabels map[string]string + LogPath string +} + +// ExtraAttributes returns the user-defined extra attributes (labels, +// environment variables) in key-value format. This can be used by log drivers +// that support metadata to add more context to a log. +func (ctx *Context) ExtraAttributes(keyMod func(string) string) map[string]string { + extra := make(map[string]string) + labels, ok := ctx.Config["labels"] + if ok && len(labels) > 0 { + for _, l := range strings.Split(labels, ",") { + if v, ok := ctx.ContainerLabels[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + env, ok := ctx.Config["env"] + if ok && len(env) > 0 { + envMapping := make(map[string]string) + for _, e := range ctx.ContainerEnv { + if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { + envMapping[kv[0]] = kv[1] + } + } + for _, l := range strings.Split(env, ",") { + if v, ok := envMapping[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + return extra +} + +// Hostname returns the hostname from the underlying OS. +func (ctx *Context) Hostname() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("logger: can not resolve hostname: %v", err) + } + return hostname, nil +} + +// Command returns the command that the container being logged was +// started with. The Entrypoint is prepended to the container +// arguments. +func (ctx *Context) Command() string { + terms := []string{ctx.ContainerEntrypoint} + for _, arg := range ctx.ContainerArgs { + terms = append(terms, arg) + } + command := strings.Join(terms, " ") + return command +} + +// ID Returns the Container ID shortened to 12 characters. +func (ctx *Context) ID() string { + return ctx.ContainerID[:12] +} + +// FullID is an alias of ContainerID. +func (ctx *Context) FullID() string { + return ctx.ContainerID +} + +// Name returns the ContainerName without a preceding '/'. +func (ctx *Context) Name() string { + return ctx.ContainerName[1:] +} + +// ImageID returns the ContainerImageID shortened to 12 characters. +func (ctx *Context) ImageID() string { + return ctx.ContainerImageID[:12] +} + +// ImageFullID is an alias of ContainerImageID. +func (ctx *Context) ImageFullID() string { + return ctx.ContainerImageID +} + +// ImageName is an alias of ContainerImageName +func (ctx *Context) ImageName() string { + return ctx.ContainerImageName +} diff --git a/vendor/github.com/docker/docker/daemon/logger/copier.go b/vendor/github.com/docker/docker/daemon/logger/copier.go new file mode 100644 index 00000000..436c0a8f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/copier.go @@ -0,0 +1,86 @@ +package logger + +import ( + "bufio" + "bytes" + "io" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +// Copier can copy logs from specified sources to Logger and attach +// ContainerID and Timestamp. +// Writes are concurrent, so you need implement some sync in your logger +type Copier struct { + // cid is the container id for which we are copying logs + cid string + // srcs is map of name -> reader pairs, for example "stdout", "stderr" + srcs map[string]io.Reader + dst Logger + copyJobs sync.WaitGroup + closed chan struct{} +} + +// NewCopier creates a new Copier +func NewCopier(cid string, srcs map[string]io.Reader, dst Logger) *Copier { + return &Copier{ + cid: cid, + srcs: srcs, + dst: dst, + closed: make(chan struct{}), + } +} + +// Run starts logs copying +func (c *Copier) Run() { + for src, w := range c.srcs { + c.copyJobs.Add(1) + go c.copySrc(src, w) + } +} + +func (c *Copier) copySrc(name string, src io.Reader) { + defer c.copyJobs.Done() + reader := bufio.NewReader(src) + + for { + select { + case <-c.closed: + return + default: + line, err := reader.ReadBytes('\n') + line = bytes.TrimSuffix(line, []byte{'\n'}) + + // ReadBytes can return full or partial output even when it failed. + // e.g. it can return a full entry and EOF. + if err == nil || len(line) > 0 { + if logErr := c.dst.Log(&Message{ContainerID: c.cid, Line: line, Source: name, Timestamp: time.Now().UTC()}); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", line, c.dst.Name(), logErr) + } + } + + if err != nil { + if err != io.EOF { + logrus.Errorf("Error scanning log stream: %s", err) + } + return + } + } + } +} + +// Wait waits until all copying is done +func (c *Copier) Wait() { + c.copyJobs.Wait() +} + +// Close closes the copier +func (c *Copier) Close() { + select { + case <-c.closed: + default: + close(c.closed) + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/factory.go b/vendor/github.com/docker/docker/daemon/logger/factory.go new file mode 100644 index 00000000..5ec0f673 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/factory.go @@ -0,0 +1,89 @@ +package logger + +import ( + "fmt" + "sync" +) + +// Creator builds a logging driver instance with given context. +type Creator func(Context) (Logger, error) + +// LogOptValidator checks the options specific to the underlying +// logging implementation. +type LogOptValidator func(cfg map[string]string) error + +type logdriverFactory struct { + registry map[string]Creator + optValidator map[string]LogOptValidator + m sync.Mutex +} + +func (lf *logdriverFactory) register(name string, c Creator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.registry[name]; ok { + return fmt.Errorf("logger: log driver named '%s' is already registered", name) + } + lf.registry[name] = c + return nil +} + +func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.optValidator[name]; ok { + return fmt.Errorf("logger: log validator named '%s' is already registered", name) + } + lf.optValidator[name] = l + return nil +} + +func (lf *logdriverFactory) get(name string) (Creator, error) { + lf.m.Lock() + defer lf.m.Unlock() + + c, ok := lf.registry[name] + if !ok { + return c, fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + return c, nil +} + +func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { + lf.m.Lock() + defer lf.m.Unlock() + + c, _ := lf.optValidator[name] + return c +} + +var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance + +// RegisterLogDriver registers the given logging driver builder with given logging +// driver name. +func RegisterLogDriver(name string, c Creator) error { + return factory.register(name, c) +} + +// RegisterLogOptValidator registers the logging option validator with +// the given logging driver name. +func RegisterLogOptValidator(name string, l LogOptValidator) error { + return factory.registerLogOptValidator(name, l) +} + +// GetLogDriver provides the logging driver builder for a logging driver name. +func GetLogDriver(name string) (Creator, error) { + return factory.get(name) +} + +// ValidateLogOpts checks the options for the given log driver. The +// options supported are specific to the LogDriver implementation. +func ValidateLogOpts(name string, cfg map[string]string) error { + l := factory.getLogOptValidator(name) + if l != nil { + return l(cfg) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go new file mode 100644 index 00000000..9faa4e02 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go @@ -0,0 +1,147 @@ +// Package jsonfilelog provides the default Logger implementation for +// Docker logging. This logger logs to files on the host server in the +// JSON format. +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/go-units" +) + +// Name is the name of the file that the jsonlogger logs to. +const Name = "json-file" + +// JSONFileLogger is Logger implementation for default Docker logging. +type JSONFileLogger struct { + buf *bytes.Buffer + writer *loggerutils.RotateFileWriter + mu sync.Mutex + readers map[*logger.LogWatcher]struct{} // stores the active log followers + extra []byte // json-encoded extra attributes +} + +func init() { + if err := logger.RegisterLogDriver(Name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates new JSONFileLogger which writes to filename passed in +// on given context. +func New(ctx logger.Context) (logger.Logger, error) { + var capval int64 = -1 + if capacity, ok := ctx.Config["max-size"]; ok { + var err error + capval, err = units.FromHumanSize(capacity) + if err != nil { + return nil, err + } + } + var maxFiles = 1 + if maxFileString, ok := ctx.Config["max-file"]; ok { + var err error + maxFiles, err = strconv.Atoi(maxFileString) + if err != nil { + return nil, err + } + if maxFiles < 1 { + return nil, fmt.Errorf("max-file cannot be less than 1") + } + } + + writer, err := loggerutils.NewRotateFileWriter(ctx.LogPath, capval, maxFiles) + if err != nil { + return nil, err + } + + var extra []byte + if attrs := ctx.ExtraAttributes(nil); len(attrs) > 0 { + var err error + extra, err = json.Marshal(attrs) + if err != nil { + return nil, err + } + } + + return &JSONFileLogger{ + buf: bytes.NewBuffer(nil), + writer: writer, + readers: make(map[*logger.LogWatcher]struct{}), + extra: extra, + }, nil +} + +// Log converts logger.Message to jsonlog.JSONLog and serializes it to file. +func (l *JSONFileLogger) Log(msg *logger.Message) error { + timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp) + if err != nil { + return err + } + l.mu.Lock() + err = (&jsonlog.JSONLogs{ + Log: append(msg.Line, '\n'), + Stream: msg.Source, + Created: timestamp, + RawAttrs: l.extra, + }).MarshalJSONBuf(l.buf) + if err != nil { + l.mu.Unlock() + return err + } + + l.buf.WriteByte('\n') + _, err = l.writer.Write(l.buf.Bytes()) + l.buf.Reset() + l.mu.Unlock() + + return err +} + +// ValidateLogOpt looks for json specific log options max-file & max-size. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "max-file": + case "max-size": + case "labels": + case "env": + default: + return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) + } + } + return nil +} + +// LogPath returns the location the given json logger logs to. +func (l *JSONFileLogger) LogPath() string { + return l.writer.LogPath() +} + +// Close closes underlying file and signals all readers to stop. +func (l *JSONFileLogger) Close() error { + l.mu.Lock() + err := l.writer.Close() + for r := range l.readers { + r.Close() + delete(l.readers, r) + } + l.mu.Unlock() + return err +} + +// Name returns name of this logger. +func (l *JSONFileLogger) Name() string { + return Name +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go new file mode 100644 index 00000000..0c8fb5e5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go @@ -0,0 +1,235 @@ +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/filenotify" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/tailfile" +) + +const maxJSONDecodeRetry = 20000 + +func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { + l.Reset() + if err := dec.Decode(l); err != nil { + return nil, err + } + msg := &logger.Message{ + Source: l.Stream, + Timestamp: l.Created, + Line: []byte(l.Log), + } + return msg, nil +} + +// ReadLogs implements the logger's LogReader interface for the logs +// created by this driver. +func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + + go l.readLogs(logWatcher, config) + return logWatcher +} + +func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + defer close(logWatcher.Msg) + + pth := l.writer.LogPath() + var files []io.ReadSeeker + for i := l.writer.MaxFiles(); i > 1; i-- { + f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) + if err != nil { + if !os.IsNotExist(err) { + logWatcher.Err <- err + break + } + continue + } + files = append(files, f) + } + + latestFile, err := os.Open(pth) + if err != nil { + logWatcher.Err <- err + return + } + + if config.Tail != 0 { + tailer := ioutils.MultiReadSeeker(append(files, latestFile)...) + tailFile(tailer, logWatcher, config.Tail, config.Since) + } + + // close all the rotated files + for _, f := range files { + if err := f.(io.Closer).Close(); err != nil { + logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) + } + } + + if !config.Follow { + return + } + + if config.Tail >= 0 { + latestFile.Seek(0, os.SEEK_END) + } + + l.mu.Lock() + l.readers[logWatcher] = struct{}{} + l.mu.Unlock() + + notifyRotate := l.writer.NotifyRotate() + followLogs(latestFile, logWatcher, notifyRotate, config.Since) + + l.mu.Lock() + delete(l.readers, logWatcher) + l.mu.Unlock() + + l.writer.NotifyRotateEvict(notifyRotate) +} + +func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { + var rdr io.Reader = f + if tail > 0 { + ls, err := tailfile.TailFile(f, tail) + if err != nil { + logWatcher.Err <- err + return + } + rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) + } + dec := json.NewDecoder(rdr) + l := &jsonlog.JSONLog{} + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err != io.EOF { + logWatcher.Err <- err + } + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { + dec := json.NewDecoder(f) + l := &jsonlog.JSONLog{} + + fileWatcher, err := filenotify.New() + if err != nil { + logWatcher.Err <- err + } + defer func() { + f.Close() + fileWatcher.Close() + }() + name := f.Name() + + if err := fileWatcher.Add(name); err != nil { + logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) + fileWatcher.Close() + fileWatcher = filenotify.NewPollingWatcher() + + if err := fileWatcher.Add(name); err != nil { + logrus.Debugf("error watching log file for modifications: %v", err) + logWatcher.Err <- err + return + } + } + + var retries int + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err != io.EOF { + // try again because this shouldn't happen + if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { + dec = json.NewDecoder(f) + retries++ + continue + } + + // io.ErrUnexpectedEOF is returned from json.Decoder when there is + // remaining data in the parser's buffer while an io.EOF occurs. + // If the json logger writes a partial json log entry to the disk + // while at the same time the decoder tries to decode it, the race condition happens. + if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { + reader := io.MultiReader(dec.Buffered(), f) + dec = json.NewDecoder(reader) + retries++ + continue + } + + return + } + + select { + case <-fileWatcher.Events(): + dec = json.NewDecoder(f) + continue + case <-fileWatcher.Errors(): + logWatcher.Err <- err + return + case <-logWatcher.WatchClose(): + fileWatcher.Remove(name) + return + case <-notifyRotate: + f.Close() + fileWatcher.Remove(name) + + // retry when the file doesn't exist + for retries := 0; retries <= 5; retries++ { + f, err = os.Open(name) + if err == nil || !os.IsNotExist(err) { + break + } + } + + if err = fileWatcher.Add(name); err != nil { + logWatcher.Err <- err + return + } + if err != nil { + logWatcher.Err <- err + return + } + + dec = json.NewDecoder(f) + continue + } + } + + retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + select { + case logWatcher.Msg <- msg: + case <-logWatcher.WatchClose(): + logWatcher.Msg <- msg + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/logger.go b/vendor/github.com/docker/docker/daemon/logger/logger.go new file mode 100644 index 00000000..cf8d571f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/logger.go @@ -0,0 +1,87 @@ +// Package logger defines interfaces that logger drivers implement to +// log messages. +// +// The other half of a logger driver is the implementation of the +// factory, which holds the contextual instance information that +// allows multiple loggers of the same type to perform different +// actions, such as logging to different locations. +package logger + +import ( + "errors" + "time" + + "github.com/docker/docker/pkg/jsonlog" +) + +// ErrReadLogsNotSupported is returned when the logger does not support reading logs. +var ErrReadLogsNotSupported = errors.New("configured logging reader does not support reading") + +const ( + // TimeFormat is the time format used for timestamps sent to log readers. + TimeFormat = jsonlog.RFC3339NanoFixed + logWatcherBufferSize = 4096 +) + +// Message is datastructure that represents record from some container. +type Message struct { + ContainerID string + Line []byte + Source string + Timestamp time.Time +} + +// Logger is the interface for docker logging drivers. +type Logger interface { + Log(*Message) error + Name() string + Close() error +} + +// ReadConfig is the configuration passed into ReadLogs. +type ReadConfig struct { + Since time.Time + Tail int + Follow bool +} + +// LogReader is the interface for reading log messages for loggers that support reading. +type LogReader interface { + // Read logs from underlying logging backend + ReadLogs(ReadConfig) *LogWatcher +} + +// LogWatcher is used when consuming logs read from the LogReader interface. +type LogWatcher struct { + // For sending log messages to a reader. + Msg chan *Message + // For sending error messages that occur while while reading logs. + Err chan error + closeNotifier chan struct{} +} + +// NewLogWatcher returns a new LogWatcher. +func NewLogWatcher() *LogWatcher { + return &LogWatcher{ + Msg: make(chan *Message, logWatcherBufferSize), + Err: make(chan error, 1), + closeNotifier: make(chan struct{}), + } +} + +// Close notifies the underlying log reader to stop. +func (w *LogWatcher) Close() { + // only close if not already closed + select { + case <-w.closeNotifier: + default: + close(w.closeNotifier) + } +} + +// WatchClose returns a channel receiver that receives notification +// when the watcher has been closed. This should only be called from +// one goroutine. +func (w *LogWatcher) WatchClose() <-chan struct{} { + return w.closeNotifier +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go new file mode 100644 index 00000000..6653b9c4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go @@ -0,0 +1,46 @@ +package loggerutils + +import ( + "bytes" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/utils/templates" +) + +// ParseLogTag generates a context aware tag for consistency across different +// log drivers based on the context of the running container. +func ParseLogTag(ctx logger.Context, defaultTemplate string) (string, error) { + tagTemplate := lookupTagTemplate(ctx, defaultTemplate) + + tmpl, err := templates.NewParse("log-tag", tagTemplate) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, &ctx); err != nil { + return "", err + } + + return buf.String(), nil +} + +func lookupTagTemplate(ctx logger.Context, defaultTemplate string) string { + tagTemplate := ctx.Config["tag"] + + deprecatedConfigs := []string{"syslog-tag", "gelf-tag", "fluentd-tag"} + for i := 0; tagTemplate == "" && i < len(deprecatedConfigs); i++ { + cfg := deprecatedConfigs[i] + if ctx.Config[cfg] != "" { + tagTemplate = ctx.Config[cfg] + logrus.Warn(fmt.Sprintf("Using log tag from deprecated log-opt '%s'. Please use: --log-opt tag=\"%s\"", cfg, tagTemplate)) + } + } + + if tagTemplate == "" { + tagTemplate = defaultTemplate + } + + return tagTemplate +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go new file mode 100644 index 00000000..99e0964a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go @@ -0,0 +1,124 @@ +package loggerutils + +import ( + "os" + "strconv" + "sync" + + "github.com/docker/docker/pkg/pubsub" +) + +// RotateFileWriter is Logger implementation for default Docker logging. +type RotateFileWriter struct { + f *os.File // store for closing + mu sync.Mutex + capacity int64 //maximum size of each file + currentSize int64 // current size of the latest file + maxFiles int //maximum number of files + notifyRotate *pubsub.Publisher +} + +//NewRotateFileWriter creates new RotateFileWriter +func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateFileWriter, error) { + log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) + if err != nil { + return nil, err + } + + size, err := log.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + + return &RotateFileWriter{ + f: log, + capacity: capacity, + currentSize: size, + maxFiles: maxFiles, + notifyRotate: pubsub.NewPublisher(0, 1), + }, nil +} + +//WriteLog write log message to File +func (w *RotateFileWriter) Write(message []byte) (int, error) { + w.mu.Lock() + if err := w.checkCapacityAndRotate(); err != nil { + w.mu.Unlock() + return -1, err + } + + n, err := w.f.Write(message) + if err == nil { + w.currentSize += int64(n) + } + w.mu.Unlock() + return n, err +} + +func (w *RotateFileWriter) checkCapacityAndRotate() error { + if w.capacity == -1 { + return nil + } + + if w.currentSize >= w.capacity { + name := w.f.Name() + if err := w.f.Close(); err != nil { + return err + } + if err := rotate(name, w.maxFiles); err != nil { + return err + } + file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 06400) + if err != nil { + return err + } + w.f = file + w.currentSize = 0 + w.notifyRotate.Publish(struct{}{}) + } + + return nil +} + +func rotate(name string, maxFiles int) error { + if maxFiles < 2 { + return nil + } + for i := maxFiles - 1; i > 1; i-- { + toPath := name + "." + strconv.Itoa(i) + fromPath := name + "." + strconv.Itoa(i-1) + if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { + return err + } + } + + if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// LogPath returns the location the given writer logs to. +func (w *RotateFileWriter) LogPath() string { + return w.f.Name() +} + +// MaxFiles return maximum number of files +func (w *RotateFileWriter) MaxFiles() int { + return w.maxFiles +} + +//NotifyRotate returns the new subscriber +func (w *RotateFileWriter) NotifyRotate() chan interface{} { + return w.notifyRotate.Subscribe() +} + +//NotifyRotateEvict removes the specified subscriber from receiving any more messages. +func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) { + w.notifyRotate.Evict(sub) +} + +// Close closes underlying file and signals all readers to stop. +func (w *RotateFileWriter) Close() error { + return w.f.Close() +} diff --git a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go new file mode 100644 index 00000000..99e03278 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go @@ -0,0 +1,247 @@ +// +build linux + +// Package syslog provides the logdriver for forwarding server logs to syslog endpoints. +package syslog + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "os" + "path" + "strconv" + "strings" + "time" + + syslog "github.com/RackSec/srslog" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + name = "syslog" + secureProto = "tcp+tls" +) + +var facilities = map[string]syslog.Priority{ + "kern": syslog.LOG_KERN, + "user": syslog.LOG_USER, + "mail": syslog.LOG_MAIL, + "daemon": syslog.LOG_DAEMON, + "auth": syslog.LOG_AUTH, + "syslog": syslog.LOG_SYSLOG, + "lpr": syslog.LOG_LPR, + "news": syslog.LOG_NEWS, + "uucp": syslog.LOG_UUCP, + "cron": syslog.LOG_CRON, + "authpriv": syslog.LOG_AUTHPRIV, + "ftp": syslog.LOG_FTP, + "local0": syslog.LOG_LOCAL0, + "local1": syslog.LOG_LOCAL1, + "local2": syslog.LOG_LOCAL2, + "local3": syslog.LOG_LOCAL3, + "local4": syslog.LOG_LOCAL4, + "local5": syslog.LOG_LOCAL5, + "local6": syslog.LOG_LOCAL6, + "local7": syslog.LOG_LOCAL7, +} + +type syslogger struct { + writer *syslog.Writer +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// rsyslog uses appname part of syslog message to fill in an %syslogtag% template +// attribute in rsyslog.conf. In order to be backward compatible to rfc3164 +// tag will be also used as an appname +func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// New creates a syslog logger using the configuration passed in on +// the context. Supported context configuration variables are +// syslog-address, syslog-facility, & syslog-tag. +func New(ctx logger.Context) (logger.Logger, error) { + tag, err := loggerutils.ParseLogTag(ctx, "{{.ID}}") + if err != nil { + return nil, err + } + + proto, address, err := parseAddress(ctx.Config["syslog-address"]) + if err != nil { + return nil, err + } + + facility, err := parseFacility(ctx.Config["syslog-facility"]) + if err != nil { + return nil, err + } + + syslogFormatter, syslogFramer, err := parseLogFormat(ctx.Config["syslog-format"]) + if err != nil { + return nil, err + } + + logTag := path.Base(os.Args[0]) + "/" + tag + + var log *syslog.Writer + if proto == secureProto { + tlsConfig, tlsErr := parseTLSConfig(ctx.Config) + if tlsErr != nil { + return nil, tlsErr + } + log, err = syslog.DialWithTLSConfig(proto, address, facility, logTag, tlsConfig) + } else { + log, err = syslog.Dial(proto, address, facility, logTag) + } + + if err != nil { + return nil, err + } + + log.SetFormatter(syslogFormatter) + log.SetFramer(syslogFramer) + + return &syslogger{ + writer: log, + }, nil +} + +func (s *syslogger) Log(msg *logger.Message) error { + if msg.Source == "stderr" { + return s.writer.Err(string(msg.Line)) + } + return s.writer.Info(string(msg.Line)) +} + +func (s *syslogger) Close() error { + return s.writer.Close() +} + +func (s *syslogger) Name() string { + return name +} + +func parseAddress(address string) (string, string, error) { + if address == "" { + return "", "", nil + } + if !urlutil.IsTransportURL(address) { + return "", "", fmt.Errorf("syslog-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", "", err + } + + // unix socket validation + if url.Scheme == "unix" { + if _, err := os.Stat(url.Path); err != nil { + return "", "", err + } + return url.Scheme, url.Path, nil + } + + // here we process tcp|udp + host := url.Host + if _, _, err := net.SplitHostPort(host); err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return "", "", err + } + host = host + ":514" + } + + return url.Scheme, host, nil +} + +// ValidateLogOpt looks for syslog specific log options +// syslog-address, syslog-facility, & syslog-tag. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "syslog-address": + case "syslog-facility": + case "syslog-tag": + case "syslog-tls-ca-cert": + case "syslog-tls-cert": + case "syslog-tls-key": + case "syslog-tls-skip-verify": + case "tag": + case "syslog-format": + default: + return fmt.Errorf("unknown log opt '%s' for syslog log driver", key) + } + } + if _, _, err := parseAddress(cfg["syslog-address"]); err != nil { + return err + } + if _, err := parseFacility(cfg["syslog-facility"]); err != nil { + return err + } + if _, _, err := parseLogFormat(cfg["syslog-format"]); err != nil { + return err + } + return nil +} + +func parseFacility(facility string) (syslog.Priority, error) { + if facility == "" { + return syslog.LOG_DAEMON, nil + } + + if syslogFacility, valid := facilities[facility]; valid { + return syslogFacility, nil + } + + fInt, err := strconv.Atoi(facility) + if err == nil && 0 <= fInt && fInt <= 23 { + return syslog.Priority(fInt << 3), nil + } + + return syslog.Priority(0), errors.New("invalid syslog facility") +} + +func parseTLSConfig(cfg map[string]string) (*tls.Config, error) { + _, skipVerify := cfg["syslog-tls-skip-verify"] + + opts := tlsconfig.Options{ + CAFile: cfg["syslog-tls-ca-cert"], + CertFile: cfg["syslog-tls-cert"], + KeyFile: cfg["syslog-tls-key"], + InsecureSkipVerify: skipVerify, + } + + return tlsconfig.Client(opts) +} + +func parseLogFormat(logFormat string) (syslog.Formatter, syslog.Framer, error) { + switch logFormat { + case "": + return syslog.UnixFormatter, syslog.DefaultFramer, nil + case "rfc3164": + return syslog.RFC3164Formatter, syslog.DefaultFramer, nil + case "rfc5424": + return rfc5424formatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + default: + return nil, nil, errors.New("Invalid syslog format") + } + +} diff --git a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_unsupported.go new file mode 100644 index 00000000..50cc51b6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package syslog diff --git a/vendor/github.com/docker/docker/daemon/logs.go b/vendor/github.com/docker/docker/daemon/logs.go new file mode 100644 index 00000000..40c47a6e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logs.go @@ -0,0 +1,154 @@ +package daemon + +import ( + "fmt" + "io" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stdcopy" + containertypes "github.com/docker/engine-api/types/container" + timetypes "github.com/docker/engine-api/types/time" +) + +// ContainerLogs hooks up a container's stdout and stderr streams +// configured with the given struct. +func (daemon *Daemon) ContainerLogs(containerName string, config *backend.ContainerLogsConfig, started chan struct{}) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + + if !(config.ShowStdout || config.ShowStderr) { + return fmt.Errorf("You must choose at least one stream") + } + + cLog, err := daemon.getLogger(container) + if err != nil { + return err + } + logReader, ok := cLog.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + + follow := config.Follow && container.IsRunning() + tailLines, err := strconv.Atoi(config.Tail) + if err != nil { + tailLines = -1 + } + + logrus.Debug("logs: begin stream") + + var since time.Time + if config.Since != "" { + s, n, err := timetypes.ParseTimestamps(config.Since, 0) + if err != nil { + return err + } + since = time.Unix(s, n) + } + readConfig := logger.ReadConfig{ + Since: since, + Tail: tailLines, + Follow: follow, + } + logs := logReader.ReadLogs(readConfig) + + wf := ioutils.NewWriteFlusher(config.OutStream) + defer wf.Close() + close(started) + wf.Flush() + + var outStream io.Writer = wf + errStream := outStream + if !container.Config.Tty { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + for { + select { + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + return nil + case <-config.Stop: + logs.Close() + return nil + case msg, ok := <-logs.Msg: + if !ok { + logrus.Debugf("logs: end stream") + logs.Close() + return nil + } + logLine := msg.Line + if config.Timestamps { + logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) + } + if msg.Source == "stdout" && config.ShowStdout { + outStream.Write(logLine) + } + if msg.Source == "stderr" && config.ShowStderr { + errStream.Write(logLine) + } + } + } +} + +func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) { + if container.LogDriver != nil && container.IsRunning() { + return container.LogDriver, nil + } + cfg := daemon.getLogConfig(container.HostConfig.LogConfig) + if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil { + return nil, err + } + return container.StartLogger(cfg) +} + +// StartLogging initializes and starts the container logging stream. +func (daemon *Daemon) StartLogging(container *container.Container) error { + cfg := daemon.getLogConfig(container.HostConfig.LogConfig) + if cfg.Type == "none" { + return nil // do not start logging routines + } + + if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil { + return err + } + l, err := container.StartLogger(cfg) + if err != nil { + return fmt.Errorf("Failed to initialize logging driver: %v", err) + } + + copier := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) + container.LogCopier = copier + copier.Run() + container.LogDriver = l + + // set LogPath field only for json-file logdriver + if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { + container.LogPath = jl.LogPath() + } + + return nil +} + +// getLogConfig returns the log configuration for the container. +func (daemon *Daemon) getLogConfig(cfg containertypes.LogConfig) containertypes.LogConfig { + if cfg.Type != "" || len(cfg.Config) > 0 { // container has log driver configured + if cfg.Type == "" { + cfg.Type = jsonfilelog.Name + } + return cfg + } + + // Use daemon's default log config for containers + return daemon.defaultLogConfig +} diff --git a/vendor/github.com/docker/docker/daemon/monitor.go b/vendor/github.com/docker/docker/daemon/monitor.go new file mode 100644 index 00000000..f9f7def9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/monitor.go @@ -0,0 +1,144 @@ +package daemon + +import ( + "errors" + "fmt" + "io" + "runtime" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/runconfig" +) + +// StateChanged updates daemon state changes from containerd +func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { + c := daemon.containers.Get(id) + if c == nil { + return fmt.Errorf("no such container: %s", id) + } + + switch e.State { + case libcontainerd.StateOOM: + // StateOOM is Linux specific and should never be hit on Windows + if runtime.GOOS == "windows" { + return errors.New("Received StateOOM from libcontainerd on Windows. This should never happen.") + } + daemon.LogContainerEvent(c, "oom") + case libcontainerd.StateExit: + c.Lock() + defer c.Unlock() + c.Wait() + c.Reset(false) + c.SetStopped(platformConstructExitStatus(e)) + attributes := map[string]string{ + "exitCode": strconv.Itoa(int(e.ExitCode)), + } + daemon.LogContainerEventWithAttributes(c, "die", attributes) + daemon.Cleanup(c) + // FIXME: here is race condition between two RUN instructions in Dockerfile + // because they share same runconfig and change image. Must be fixed + // in builder/builder.go + return c.ToDisk() + case libcontainerd.StateRestart: + c.Lock() + defer c.Unlock() + c.Reset(false) + c.RestartCount++ + c.SetRestarting(platformConstructExitStatus(e)) + attributes := map[string]string{ + "exitCode": strconv.Itoa(int(e.ExitCode)), + } + daemon.LogContainerEventWithAttributes(c, "die", attributes) + return c.ToDisk() + case libcontainerd.StateExitProcess: + c.Lock() + defer c.Unlock() + if execConfig := c.ExecCommands.Get(e.ProcessID); execConfig != nil { + ec := int(e.ExitCode) + execConfig.ExitCode = &ec + execConfig.Running = false + execConfig.Wait() + if err := execConfig.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", c.ID, err) + } + + // remove the exec command from the container's store only and not the + // daemon's store so that the exec command can be inspected. + c.ExecCommands.Delete(execConfig.ID) + } else { + logrus.Warnf("Ignoring StateExitProcess for %v but no exec command found", e) + } + case libcontainerd.StateStart, libcontainerd.StateRestore: + c.SetRunning(int(e.Pid), e.State == libcontainerd.StateStart) + c.HasBeenManuallyStopped = false + if err := c.ToDisk(); err != nil { + c.Reset(false) + return err + } + daemon.LogContainerEvent(c, "start") + case libcontainerd.StatePause: + c.Paused = true + daemon.LogContainerEvent(c, "pause") + case libcontainerd.StateResume: + c.Paused = false + daemon.LogContainerEvent(c, "unpause") + } + + return nil +} + +// AttachStreams is called by libcontainerd to connect the stdio. +func (daemon *Daemon) AttachStreams(id string, iop libcontainerd.IOPipe) error { + var s *runconfig.StreamConfig + c := daemon.containers.Get(id) + if c == nil { + ec, err := daemon.getExecConfig(id) + if err != nil { + return fmt.Errorf("no such exec/container: %s", id) + } + s = ec.StreamConfig + } else { + s = c.StreamConfig + if err := daemon.StartLogging(c); err != nil { + c.Reset(false) + return err + } + } + + if stdin := s.Stdin(); stdin != nil { + if iop.Stdin != nil { + go func() { + io.Copy(iop.Stdin, stdin) + iop.Stdin.Close() + }() + } + } else { + if c != nil && !c.Config.Tty { + // tty is enabled, so dont close containerd's iopipe stdin. + if iop.Stdin != nil { + iop.Stdin.Close() + } + } + } + + copy := func(w io.Writer, r io.Reader) { + s.Add(1) + go func() { + if _, err := io.Copy(w, r); err != nil { + logrus.Errorf("%v stream copy error: %v", id, err) + } + s.Done() + }() + } + + if iop.Stdout != nil { + copy(s.Stdout(), iop.Stdout) + } + if iop.Stderr != nil { + copy(s.Stderr(), iop.Stderr) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/monitor_linux.go b/vendor/github.com/docker/docker/daemon/monitor_linux.go new file mode 100644 index 00000000..df8b6c5d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/monitor_linux.go @@ -0,0 +1,14 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + OOMKilled: e.OOMKilled, + } +} diff --git a/vendor/github.com/docker/docker/daemon/mounts.go b/vendor/github.com/docker/docker/daemon/mounts.go new file mode 100644 index 00000000..d4f24b28 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/mounts.go @@ -0,0 +1,48 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/container" + volumestore "github.com/docker/docker/volume/store" +) + +func (daemon *Daemon) prepareMountPoints(container *container.Container) error { + for _, config := range container.MountPoints { + if err := daemon.lazyInitializeVolume(container.ID, config); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { + var rmErrors []string + for _, m := range container.MountPoints { + if m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + if rm { + // Do not remove named mountpoints + // these are mountpoints specified like `docker run -v :/foo` + if m.Named { + continue + } + err := daemon.volumes.Remove(m.Volume) + // Ignore volume in use errors because having this + // volume being referenced by other container is + // not an error, but an implementation detail. + // This prevents docker from logging "ERROR: Volume in use" + // where there is another container using the volume. + if err != nil && !volumestore.IsInUse(err) { + rmErrors = append(rmErrors, err.Error()) + } + } + } + if len(rmErrors) > 0 { + return fmt.Errorf("Error removing volumes:\n%v", strings.Join(rmErrors, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/network/settings.go b/vendor/github.com/docker/docker/daemon/network/settings.go new file mode 100644 index 00000000..823bec26 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/network/settings.go @@ -0,0 +1,22 @@ +package network + +import ( + networktypes "github.com/docker/engine-api/types/network" + "github.com/docker/go-connections/nat" +) + +// Settings stores configuration details about the daemon network config +// TODO Windows. Many of these fields can be factored out., +type Settings struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Networks map[string]*networktypes.EndpointSettings + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []networktypes.Address + SecondaryIPv6Addresses []networktypes.Address + IsAnonymousEndpoint bool +} diff --git a/vendor/github.com/docker/docker/daemon/network_operations.go b/vendor/github.com/docker/docker/daemon/network_operations.go new file mode 100644 index 00000000..70097298 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/network_operations.go @@ -0,0 +1,77 @@ +package daemon + +import ( + "fmt" + "os" + + "github.com/docker/docker/container" + derr "github.com/docker/docker/errors" + networktypes "github.com/docker/engine-api/types/network" +) + +func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) error { + return nil +} + +func (daemon *Daemon) updateNetwork(container *container.Container) error { + return nil +} + +func (daemon *Daemon) allocateNetwork(container *container.Container) error { + return nil +} + +func (daemon *Daemon) releaseNetwork(container *container.Container) { + if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { + return + } +} + +func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) { + nc, err := daemon.GetContainer(connectedContainerID) + if err != nil { + return nil, err + } + if containerID == nc.ID { + return nil, fmt.Errorf("cannot join own network") + } + if !nc.IsRunning() { + err := fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID) + return nil, derr.NewRequestConflictError(err) + } + if nc.IsRestarting() { + return nil, errContainerIsRestarting(connectedContainerID) + } + return nc, nil +} + +func (daemon *Daemon) initializeNetworking(container *container.Container) error { + var err error + + if container.HostConfig.NetworkMode.IsContainer() { + // we need to get the hosts files from the container to join + nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + container.HostnamePath = nc.HostnamePath + container.HostsPath = nc.HostsPath + container.ResolvConfPath = nc.ResolvConfPath + container.Config.Hostname = nc.Config.Hostname + container.Config.Domainname = nc.Config.Domainname + return nil + } + + if container.HostConfig.NetworkMode.IsHost() { + container.Config.Hostname, err = os.Hostname() + if err != nil { + return err + } + } + + if err := daemon.allocateNetwork(container); err != nil { + return err + } + + return container.BuildHostnameFile() +} diff --git a/vendor/github.com/docker/docker/daemon/oci_linux.go b/vendor/github.com/docker/docker/daemon/oci_linux.go new file mode 100644 index 00000000..47d4b89f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/oci_linux.go @@ -0,0 +1,686 @@ +package daemon + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/volume" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/opencontainers/specs/specs-go" +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + weightDevices, err := getBlkioWeightDevices(r) + if err != nil { + return err + } + readBpsDevice, err := getBlkioReadBpsDevices(r) + if err != nil { + return err + } + writeBpsDevice, err := getBlkioWriteBpsDevices(r) + if err != nil { + return err + } + readIOpsDevice, err := getBlkioReadIOpsDevices(r) + if err != nil { + return err + } + writeIOpsDevice, err := getBlkioWriteIOpsDevices(r) + if err != nil { + return err + } + + memoryRes := getMemoryResources(r) + cpuRes := getCPUResources(r) + blkioWeight := r.BlkioWeight + + specResources := &specs.Resources{ + Memory: memoryRes, + CPU: cpuRes, + BlockIO: &specs.BlockIO{ + Weight: &blkioWeight, + WeightDevice: weightDevices, + ThrottleReadBpsDevice: readBpsDevice, + ThrottleWriteBpsDevice: writeBpsDevice, + ThrottleReadIOPSDevice: readIOpsDevice, + ThrottleWriteIOPSDevice: writeIOpsDevice, + }, + DisableOOMKiller: r.OomKillDisable, + Pids: &specs.Pids{ + Limit: &r.PidsLimit, + }, + } + + if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 { + specResources.Devices = s.Linux.Resources.Devices + } + + s.Linux.Resources = specResources + return nil +} + +func setDevices(s *specs.Spec, c *container.Container) error { + // Build lists of devices allowed and created within the container. + var devs []specs.Device + devPermissions := s.Linux.Resources.Devices + if c.HostConfig.Privileged { + hostDevices, err := devices.HostDevices() + if err != nil { + return err + } + for _, d := range hostDevices { + devs = append(devs, specDevice(d)) + } + rwm := "rwm" + devPermissions = []specs.DeviceCgroup{ + { + Allow: true, + Access: &rwm, + }, + } + } else { + for _, deviceMapping := range c.HostConfig.Devices { + d, dPermissions, err := getDevicesFromPath(deviceMapping) + if err != nil { + return err + } + devs = append(devs, d...) + devPermissions = append(devPermissions, dPermissions...) + } + } + + s.Linux.Devices = append(s.Linux.Devices, devs...) + s.Linux.Resources.Devices = devPermissions + return nil +} + +func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error { + var rlimits []specs.Rlimit + + ulimits := c.HostConfig.Ulimits + // Merge ulimits with daemon defaults + ulIdx := make(map[string]struct{}) + for _, ul := range ulimits { + ulIdx[ul.Name] = struct{}{} + } + for name, ul := range daemon.configStore.Ulimits { + if _, exists := ulIdx[name]; !exists { + ulimits = append(ulimits, ul) + } + } + + for _, ul := range ulimits { + rlimits = append(rlimits, specs.Rlimit{ + Type: "RLIMIT_" + strings.ToUpper(ul.Name), + Soft: uint64(ul.Soft), + Hard: uint64(ul.Hard), + }) + } + + s.Process.Rlimits = rlimits + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func readUserFile(c *container.Container, p string) (io.ReadCloser, error) { + fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS) + if err != nil { + return nil, err + } + return os.Open(fp) +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + passwdPath, err := user.GetPasswdPath() + if err != nil { + return 0, 0, nil, err + } + groupPath, err := user.GetGroupPath() + if err != nil { + return 0, 0, nil, err + } + passwdFile, err := readUserFile(c, passwdPath) + if err == nil { + defer passwdFile.Close() + } + groupFile, err := readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + + execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) + if err != nil { + return 0, 0, nil, err + } + + // todo: fix this double read by a change to libcontainer/user pkg + groupFile, err = readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + var addGroups []int + if len(c.HostConfig.GroupAdd) > 0 { + addGroups, err = user.GetAdditionalGroups(c.HostConfig.GroupAdd, groupFile) + if err != nil { + return 0, 0, nil, err + } + } + uid := uint32(execUser.Uid) + gid := uint32(execUser.Gid) + sgids := append(execUser.Sgids, addGroups...) + var additionalGids []uint32 + for _, g := range sgids { + additionalGids = append(additionalGids, uint32(g)) + } + return uid, gid, additionalGids, nil +} + +func setNamespace(s *specs.Spec, ns specs.Namespace) { + for i, n := range s.Linux.Namespaces { + if n.Type == ns.Type { + s.Linux.Namespaces[i] = ns + return + } + } + s.Linux.Namespaces = append(s.Linux.Namespaces, ns) +} + +func setCapabilities(s *specs.Spec, c *container.Container) error { + var caplist []string + var err error + if c.HostConfig.Privileged { + caplist = caps.GetAllCapabilities() + } else { + caplist, err = caps.TweakCapabilities(s.Process.Capabilities, c.HostConfig.CapAdd, c.HostConfig.CapDrop) + if err != nil { + return err + } + } + s.Process.Capabilities = caplist + return nil +} + +func delNamespace(s *specs.Spec, nsType specs.NamespaceType) { + idx := -1 + for i, n := range s.Linux.Namespaces { + if n.Type == nsType { + idx = i + } + } + if idx >= 0 { + s.Linux.Namespaces = append(s.Linux.Namespaces[:idx], s.Linux.Namespaces[idx+1:]...) + } +} + +func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error { + userNS := false + // user + if c.HostConfig.UsernsMode.IsPrivate() { + uidMap, gidMap := daemon.GetUIDGIDMaps() + if uidMap != nil { + userNS = true + ns := specs.Namespace{Type: "user"} + setNamespace(s, ns) + s.Linux.UIDMappings = specMapping(uidMap) + s.Linux.GIDMappings = specMapping(gidMap) + } + } + // network + if !c.Config.NetworkDisabled { + ns := specs.Namespace{Type: "network"} + parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) + if parts[0] == "container" { + nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()) + if userNS { + // to share a net namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.NetworkMode.IsHost() { + ns.Path = fmt.Sprintf("/proc/%d/ns/net", os.Getpid()) + } + setNamespace(s, ns) + } + // ipc + if c.HostConfig.IpcMode.IsContainer() { + ns := specs.Namespace{Type: "ipc"} + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share an IPC namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.IpcMode.IsHost() { + delNamespace(s, specs.NamespaceType("ipc")) + } else { + ns := specs.Namespace{Type: "ipc"} + setNamespace(s, ns) + } + // pid + if c.HostConfig.PidMode.IsHost() { + delNamespace(s, specs.NamespaceType("pid")) + } + // uts + if c.HostConfig.UTSMode.IsHost() { + delNamespace(s, specs.NamespaceType("uts")) + s.Hostname = "" + } + + return nil +} + +func specMapping(s []idtools.IDMap) []specs.IDMapping { + var ids []specs.IDMapping + for _, item := range s { + ids = append(ids, specs.IDMapping{ + HostID: uint32(item.HostID), + ContainerID: uint32(item.ContainerID), + Size: uint32(item.Size), + }) + } + return ids +} + +func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { + for _, m := range mountinfo { + if m.Mountpoint == dir { + return m + } + } + return nil +} + +// Get the source mount point of directory passed in as argument. Also return +// optional fields. +func getSourceMount(source string) (string, string, error) { + // Ensure any symlinks are resolved. + sourcePath, err := filepath.EvalSymlinks(source) + if err != nil { + return "", "", err + } + + mountinfos, err := mount.GetMounts() + if err != nil { + return "", "", err + } + + mountinfo := getMountInfo(mountinfos, sourcePath) + if mountinfo != nil { + return sourcePath, mountinfo.Optional, nil + } + + path := sourcePath + for { + path = filepath.Dir(path) + + mountinfo = getMountInfo(mountinfos, path) + if mountinfo != nil { + return path, mountinfo.Optional, nil + } + + if path == "/" { + break + } + } + + // If we are here, we did not find parent mount. Something is wrong. + return "", "", fmt.Errorf("Could not find source mount of %s", source) +} + +// Ensure mount point on which path is mounted, is shared. +func ensureShared(path string) error { + sharedMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } + } + + if !sharedMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared mount.", path, sourceMount) + } + return nil +} + +// Ensure mount point on which path is mounted, is either shared or slave. +func ensureSharedOrSlave(path string) error { + sharedMount := false + slaveMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } else if strings.HasPrefix(opt, "master:") { + slaveMount = true + break + } + } + + if !sharedMount && !slaveMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared or slave mount.", path, sourceMount) + } + return nil +} + +var ( + mountPropagationMap = map[string]int{ + "private": mount.PRIVATE, + "rprivate": mount.RPRIVATE, + "shared": mount.SHARED, + "rshared": mount.RSHARED, + "slave": mount.SLAVE, + "rslave": mount.RSLAVE, + } + + mountPropagationReverseMap = map[int]string{ + mount.PRIVATE: "private", + mount.RPRIVATE: "rprivate", + mount.SHARED: "shared", + mount.RSHARED: "rshared", + mount.SLAVE: "slave", + mount.RSLAVE: "rslave", + } +) + +func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error { + userMounts := make(map[string]struct{}) + for _, m := range mounts { + userMounts[m.Destination] = struct{}{} + } + + // Filter out mounts that are overriden by user supplied mounts + var defaultMounts []specs.Mount + _, mountDev := userMounts["/dev"] + for _, m := range s.Mounts { + if _, ok := userMounts[m.Destination]; !ok { + if mountDev && strings.HasPrefix(m.Destination, "/dev/") { + continue + } + defaultMounts = append(defaultMounts, m) + } + } + + s.Mounts = defaultMounts + for _, m := range mounts { + for _, cm := range s.Mounts { + if cm.Destination == m.Destination { + return fmt.Errorf("Duplicate mount point '%s'", m.Destination) + } + } + + if m.Source == "tmpfs" { + opt := []string{"noexec", "nosuid", "nodev", volume.DefaultPropagationMode} + if m.Data != "" { + opt = append(opt, strings.Split(m.Data, ",")...) + } else { + opt = append(opt, "size=65536k") + } + + s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: opt}) + continue + } + + mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"} + + // Determine property of RootPropagation based on volume + // properties. If a volume is shared, then keep root propagation + // shared. This should work for slave and private volumes too. + // + // For slave volumes, it can be either [r]shared/[r]slave. + // + // For private volumes any root propagation value should work. + pFlag := mountPropagationMap[m.Propagation] + if pFlag == mount.SHARED || pFlag == mount.RSHARED { + if err := ensureShared(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED] + } + } else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE { + if err := ensureSharedOrSlave(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] + } + } + + opts := []string{"rbind"} + if !m.Writable { + opts = append(opts, "ro") + } + if pFlag != 0 { + opts = append(opts, mountPropagationReverseMap[pFlag]) + } + + mt.Options = opts + s.Mounts = append(s.Mounts, mt) + } + + if s.Root.Readonly { + for i, m := range s.Mounts { + switch m.Destination { + case "/proc", "/dev/pts", "/dev/mqueue": // /dev is remounted by runc + continue + } + if _, ok := userMounts[m.Destination]; !ok { + if !stringutils.InSlice(m.Options, "ro") { + s.Mounts[i].Options = append(s.Mounts[i].Options, "ro") + } + } + } + } + + if c.HostConfig.Privileged { + if !s.Root.Readonly { + // clear readonly for /sys + for i := range s.Mounts { + if s.Mounts[i].Destination == "/sys" { + clearReadOnly(&s.Mounts[i]) + } + } + } + s.Linux.ReadonlyPaths = nil + s.Linux.MaskedPaths = nil + } + + // TODO: until a kernel/mount solution exists for handling remount in a user namespace, + // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) + if uidMap, _ := daemon.GetUIDGIDMaps(); uidMap != nil || c.HostConfig.Privileged { + for i, m := range s.Mounts { + if m.Type == "cgroup" { + clearReadOnly(&s.Mounts[i]) + } + } + } + + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: c.BaseFS, + Readonly: c.HostConfig.ReadonlyRootfs, + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + cwd := c.Config.WorkingDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Args = append([]string{c.Path}, c.Args...) + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + var cgroupsPath string + scopePrefix := "docker" + parent := "/docker" + useSystemd := UsingSystemd(daemon.configStore) + if useSystemd { + parent = "system.slice" + } + + if c.HostConfig.CgroupParent != "" { + parent = c.HostConfig.CgroupParent + } else if daemon.configStore.CgroupParent != "" { + parent = daemon.configStore.CgroupParent + } + + if useSystemd { + cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID + logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) + } else { + cgroupsPath = filepath.Join(parent, c.ID) + } + s.Linux.CgroupsPath = &cgroupsPath + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("linux runtime spec resources: %v", err) + } + s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj + if err := setDevices(&s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec devices: %v", err) + } + if err := setRlimits(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) + } + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("linux spec user: %v", err) + } + if err := setNamespaces(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux spec namespaces: %v", err) + } + if err := setCapabilities(&s, c); err != nil { + return nil, fmt.Errorf("linux spec capabilities: %v", err) + } + if err := setSeccomp(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux seccomp: %v", err) + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + mounts, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + mounts = append(mounts, c.IpcMounts()...) + mounts = append(mounts, c.TmpfsMounts()...) + if err := setMounts(daemon, &s, c, mounts); err != nil { + return nil, fmt.Errorf("linux mounts: %v", err) + } + + //for _, ns := range s.Linux.Namespaces { + // if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { + // target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")) + // if err != nil { + // return nil, err + // } + + // s.Hooks = specs.Hooks{ + // Prestart: []specs.Hook{{ + // Path: target, // FIXME: cross-platform + // Args: []string{"libnetwork-setkey", c.ID}, + // }}, + // } + // } + //} + + if apparmor.IsEnabled() { + appArmorProfile := "docker-default" + if len(c.AppArmorProfile) > 0 { + appArmorProfile = c.AppArmorProfile + } else if c.HostConfig.Privileged { + appArmorProfile = "unconfined" + } + s.Process.ApparmorProfile = appArmorProfile + } + s.Process.SelinuxLabel = c.GetProcessLabel() + s.Process.NoNewPrivileges = c.NoNewPrivileges + s.Linux.MountLabel = c.MountLabel + + return (*libcontainerd.Spec)(&s), nil +} + +func clearReadOnly(m *specs.Mount) { + var opt []string + for _, o := range m.Options { + if o != "ro" { + opt = append(opt, o) + } + } + m.Options = opt +} diff --git a/vendor/github.com/docker/docker/daemon/pause.go b/vendor/github.com/docker/docker/daemon/pause.go new file mode 100644 index 00000000..dbfafbc5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/pause.go @@ -0,0 +1,49 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerPause pauses a container +func (daemon *Daemon) ContainerPause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerPause(container); err != nil { + return err + } + + return nil +} + +// containerPause pauses the container execution without stopping the process. +// The execution can be resumed by calling containerUnpause. +func (daemon *Daemon) containerPause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot Pause the container which is not running + if !container.Running { + return errNotRunning{container.ID} + } + + // We cannot Pause the container which is already paused + if container.Paused { + return fmt.Errorf("Container %s is already paused", container.ID) + } + + // We cannot Pause the container which is restarting + if container.Restarting { + return errContainerIsRestarting(container.ID) + } + + if err := daemon.containerd.Pause(container.ID); err != nil { + return fmt.Errorf("Cannot pause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/rename.go b/vendor/github.com/docker/docker/daemon/rename.go new file mode 100644 index 00000000..7989ad23 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/rename.go @@ -0,0 +1,65 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" +) + +// ContainerRename changes the name of a container, using the oldName +// to find the container. An error is returned if newName is already +// reserved. +func (daemon *Daemon) ContainerRename(oldName, newName string) error { + if oldName == "" || newName == "" { + return fmt.Errorf("Neither old nor new names may be empty") + } + + container, err := daemon.GetContainer(oldName) + if err != nil { + return err + } + + oldName = container.Name + + container.Lock() + defer container.Unlock() + if newName, err = daemon.reserveName(container.ID, newName); err != nil { + return fmt.Errorf("Error when allocating new name: %v", err) + } + + container.Name = newName + + defer func() { + if err != nil { + container.Name = oldName + daemon.reserveName(container.ID, oldName) + daemon.releaseName(newName) + } + }() + + daemon.releaseName(oldName) + if err = container.ToDisk(); err != nil { + return err + } + + attributes := map[string]string{ + "oldName": oldName, + } + + if !container.Running { + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil + } + + defer func() { + if err != nil { + container.Name = oldName + if e := container.ToDisk(); e != nil { + logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) + } + } + }() + + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/resize.go b/vendor/github.com/docker/docker/daemon/resize.go new file mode 100644 index 00000000..74735385 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/resize.go @@ -0,0 +1,40 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/libcontainerd" +) + +// ContainerResize changes the size of the TTY of the process running +// in the container with the given name to the given height and width. +func (daemon *Daemon) ContainerResize(name string, height, width int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + if err = daemon.containerd.Resize(container.ID, libcontainerd.InitFriendlyName, width, height); err == nil { + attributes := map[string]string{ + "height": fmt.Sprintf("%d", height), + "width": fmt.Sprintf("%d", width), + } + daemon.LogContainerEventWithAttributes(container, "resize", attributes) + } + return err +} + +// ContainerExecResize changes the size of the TTY of the process +// running in the exec with the given name to the given height and +// width. +func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { + ec, err := daemon.getExecConfig(name) + if err != nil { + return err + } + return daemon.containerd.Resize(ec.ContainerID, ec.ID, width, height) +} diff --git a/vendor/github.com/docker/docker/daemon/restart.go b/vendor/github.com/docker/docker/daemon/restart.go new file mode 100644 index 00000000..3779116c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/restart.go @@ -0,0 +1,48 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerRestart stops and starts a container. It attempts to +// gracefully stop the container within the given timeout, forcefully +// stopping it if the timeout is exceeded. If given a negative +// timeout, ContainerRestart will wait forever until a graceful +// stop. Returns an error if the container cannot be found, or if +// there is an underlying error at any stage of the restart. +func (daemon *Daemon) ContainerRestart(name string, seconds int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if err := daemon.containerRestart(container, seconds); err != nil { + return fmt.Errorf("Cannot restart container %s: %v", name, err) + } + return nil +} + +// containerRestart attempts to gracefully stop and then start the +// container. When stopping, wait for the given duration in seconds to +// gracefully stop, before forcefully terminating the container. If +// given a negative duration, wait forever for a graceful stop. +func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := daemon.Mount(container); err == nil { + defer daemon.Unmount(container) + } + + if err := daemon.containerStop(container, seconds); err != nil { + return err + } + + if err := daemon.containerStart(container); err != nil { + return err + } + + daemon.LogContainerEvent(container, "restart") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_disabled.go b/vendor/github.com/docker/docker/daemon/seccomp_disabled.go new file mode 100644 index 00000000..620eee29 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/seccomp_disabled.go @@ -0,0 +1,12 @@ +// +build !seccomp,!windows + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/opencontainers/specs/specs-go" +) + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_linux.go b/vendor/github.com/docker/docker/daemon/seccomp_linux.go new file mode 100644 index 00000000..659a15de --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/seccomp_linux.go @@ -0,0 +1,46 @@ +// +build linux,seccomp + +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/profiles/seccomp" + "github.com/opencontainers/specs/specs-go" +) + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + var profile *specs.Seccomp + var err error + + if c.HostConfig.Privileged { + return nil + } + + if !daemon.seccompEnabled { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.") + } + logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.") + c.SeccompProfile = "unconfined" + } + if c.SeccompProfile == "unconfined" { + return nil + } + if c.SeccompProfile != "" { + profile, err = seccomp.LoadProfile(c.SeccompProfile) + if err != nil { + return err + } + } else { + profile, err = seccomp.GetDefaultProfile() + if err != nil { + return err + } + } + + rs.Linux.Seccomp = profile + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/selinux_linux.go b/vendor/github.com/docker/docker/daemon/selinux_linux.go new file mode 100644 index 00000000..83a34471 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/selinux_linux.go @@ -0,0 +1,17 @@ +// +build linux + +package daemon + +import "github.com/opencontainers/runc/libcontainer/selinux" + +func selinuxSetDisabled() { + selinux.SetDisabled() +} + +func selinuxFreeLxcContexts(label string) { + selinux.FreeLxcContexts(label) +} + +func selinuxEnabled() bool { + return selinux.SelinuxEnabled() +} diff --git a/vendor/github.com/docker/docker/daemon/selinux_unsupported.go b/vendor/github.com/docker/docker/daemon/selinux_unsupported.go new file mode 100644 index 00000000..25a56ad1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/selinux_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package daemon + +func selinuxSetDisabled() { +} + +func selinuxFreeLxcContexts(label string) { +} + +func selinuxEnabled() bool { + return false +} diff --git a/vendor/github.com/docker/docker/daemon/start.go b/vendor/github.com/docker/docker/daemon/start.go new file mode 100644 index 00000000..1b34f426 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/start.go @@ -0,0 +1,185 @@ +package daemon + +import ( + "fmt" + "net/http" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/errors" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/runconfig" + containertypes "github.com/docker/engine-api/types/container" +) + +// ContainerStart starts a container. +func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if container.IsPaused() { + return fmt.Errorf("Cannot start a paused container, try unpause instead.") + } + + if container.IsRunning() { + err := fmt.Errorf("Container already started") + return errors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + + // Windows does not have the backwards compatibility issue here. + if runtime.GOOS != "windows" { + // This is kept for backward compatibility - hostconfig should be passed when + // creating a container, not during start. + if hostConfig != nil { + logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and will be removed in Docker 1.12") + oldNetworkMode := container.HostConfig.NetworkMode + if err := daemon.setSecurityOptions(container, hostConfig); err != nil { + return err + } + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return err + } + newNetworkMode := container.HostConfig.NetworkMode + if string(oldNetworkMode) != string(newNetworkMode) { + // if user has change the network mode on starting, clean up the + // old networks. It is a deprecated feature and will be removed in Docker 1.12 + container.NetworkSettings.Networks = nil + if err := container.ToDisk(); err != nil { + return err + } + } + container.InitDNSHostConfig() + } + } else { + if hostConfig != nil { + return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create") + } + } + + // check if hostConfig is in line with the current system settings. + // It may happen cgroups are umounted or the like. + if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false); err != nil { + return err + } + // Adapt for old containers in case we have updates in this function and + // old containers never have chance to call the new function in create stage. + if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil { + return err + } + + return daemon.containerStart(container) +} + +// Start starts a container +func (daemon *Daemon) Start(container *container.Container) error { + return daemon.containerStart(container) +} + +// containerStart prepares the container to run by setting up everything the +// container needs, such as storage and networking, as well as links +// between containers. The container is left waiting for a signal to +// begin running. +func (daemon *Daemon) containerStart(container *container.Container) (err error) { + container.Lock() + defer container.Unlock() + + if container.Running { + return nil + } + + if container.RemovalInProgress || container.Dead { + return fmt.Errorf("Container is marked for removal and cannot be started.") + } + + // if we encounter an error during start we need to ensure that any other + // setup has been cleaned up properly + defer func() { + if err != nil { + container.SetError(err) + // if no one else has set it, make sure we don't leave it at zero + if container.ExitCode == 0 { + container.ExitCode = 128 + } + container.ToDisk() + daemon.Cleanup(container) + attributes := map[string]string{ + "exitCode": fmt.Sprintf("%d", container.ExitCode), + } + daemon.LogContainerEventWithAttributes(container, "die", attributes) + } + }() + + if err := daemon.conditionalMountOnStart(container); err != nil { + return err + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards API compatibility. + container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) + + if err := daemon.initializeNetworking(container); err != nil { + return err + } + + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + if err := daemon.containerd.Create(container.ID, *spec, libcontainerd.WithRestartManager(container.RestartManager(true))); err != nil { + // if we receive an internal error from the initial start of a container then lets + // return it instead of entering the restart loop + // set to 127 for container cmd not found/does not exist) + if strings.Contains(err.Error(), "executable file not found") || + strings.Contains(err.Error(), "no such file or directory") || + strings.Contains(err.Error(), "system cannot find the file specified") { + container.ExitCode = 127 + err = fmt.Errorf("Container command '%s' not found or does not exist.", container.Path) + } + // set to 126 for container cmd can't be invoked errors + if strings.Contains(err.Error(), syscall.EACCES.Error()) { + container.ExitCode = 126 + err = fmt.Errorf("Container command '%s' could not be invoked.", container.Path) + } + + container.Reset(false) + + // start event is logged even on error + daemon.LogContainerEvent(container, "start") + return err + } + + return nil +} + +// Cleanup releases any network resources allocated to the container along with any rules +// around how containers are linked together. It also unmounts the container's root filesystem. +func (daemon *Daemon) Cleanup(container *container.Container) { + daemon.releaseNetwork(container) + + container.UnmountIpcMounts(detachMounted) + + if err := daemon.conditionalUnmountOnCleanup(container); err != nil { + // FIXME: remove once reference counting for graphdrivers has been refactored + // Ensure that all the mounts are gone + if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + } + + for _, eConfig := range container.ExecCommands.Commands() { + daemon.unregisterExecCommand(container, eConfig) + } + + if container.BaseFS != "" { + if err := container.UnmountVolumes(false, daemon.LogVolumeEvent); err != nil { + logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) + } + } + container.CancelAttachContext() +} diff --git a/vendor/github.com/docker/docker/daemon/stats.go b/vendor/github.com/docker/docker/daemon/stats.go new file mode 100644 index 00000000..cb3478cc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats.go @@ -0,0 +1,121 @@ +package daemon + +import ( + "encoding/json" + "errors" + "runtime" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/versions/v1p20" +) + +// ContainerStats writes information about the container to the stream +// given in the config object. +func (daemon *Daemon) ContainerStats(prefixOrName string, config *backend.ContainerStatsConfig) error { + if runtime.GOOS == "windows" { + return errors.New("Windows does not support stats") + } + // Remote API version (used for backwards compatibility) + apiVersion := version.Version(config.Version) + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + + // If the container is not running and requires no stream, return an empty stats. + if !container.IsRunning() && !config.Stream { + return json.NewEncoder(config.OutStream).Encode(&types.Stats{}) + } + + outStream := config.OutStream + if config.Stream { + wf := ioutils.NewWriteFlusher(outStream) + defer wf.Close() + wf.Flush() + outStream = wf + } + + var preCPUStats types.CPUStats + getStatJSON := func(v interface{}) *types.StatsJSON { + ss := v.(types.StatsJSON) + ss.PreCPUStats = preCPUStats + // ss.MemoryStats.Limit = uint64(update.MemoryLimit) + preCPUStats = ss.CPUStats + return &ss + } + + enc := json.NewEncoder(outStream) + + updates := daemon.subscribeToContainerStats(container) + defer daemon.unsubscribeToContainerStats(container, updates) + + noStreamFirstFrame := true + for { + select { + case v, ok := <-updates: + if !ok { + return nil + } + + var statsJSON interface{} + statsJSONPost120 := getStatJSON(v) + if apiVersion.LessThan("1.21") { + var ( + rxBytes uint64 + rxPackets uint64 + rxErrors uint64 + rxDropped uint64 + txBytes uint64 + txPackets uint64 + txErrors uint64 + txDropped uint64 + ) + for _, v := range statsJSONPost120.Networks { + rxBytes += v.RxBytes + rxPackets += v.RxPackets + rxErrors += v.RxErrors + rxDropped += v.RxDropped + txBytes += v.TxBytes + txPackets += v.TxPackets + txErrors += v.TxErrors + txDropped += v.TxDropped + } + statsJSON = &v1p20.StatsJSON{ + Stats: statsJSONPost120.Stats, + Network: types.NetworkStats{ + RxBytes: rxBytes, + RxPackets: rxPackets, + RxErrors: rxErrors, + RxDropped: rxDropped, + TxBytes: txBytes, + TxPackets: txPackets, + TxErrors: txErrors, + TxDropped: txDropped, + }, + } + } else { + statsJSON = statsJSONPost120 + } + + if !config.Stream && noStreamFirstFrame { + // prime the cpu stats so they aren't 0 in the final output + noStreamFirstFrame = false + continue + } + + if err := enc.Encode(statsJSON); err != nil { + return err + } + + if !config.Stream { + return nil + } + case <-config.Stop: + return nil + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector_unix.go b/vendor/github.com/docker/docker/daemon/stats_collector_unix.go new file mode 100644 index 00000000..1f016322 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_collector_unix.go @@ -0,0 +1,189 @@ +// +build !windows + +package daemon + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/pubsub" + sysinfo "github.com/docker/docker/pkg/system" + "github.com/docker/engine-api/types" + "github.com/opencontainers/runc/libcontainer/system" +) + +type statsSupervisor interface { + // GetContainerStats collects all the stats related to a container + GetContainerStats(container *container.Container) (*types.StatsJSON, error) +} + +// newStatsCollector returns a new statsCollector that collections +// network and cgroup stats for a registered container at the specified +// interval. The collector allows non-running containers to be added +// and will start processing stats when they are started. +func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { + s := &statsCollector{ + interval: interval, + supervisor: daemon, + publishers: make(map[*container.Container]*pubsub.Publisher), + clockTicksPerSecond: uint64(system.GetClockTicks()), + bufReader: bufio.NewReaderSize(nil, 128), + } + meminfo, err := sysinfo.ReadMemInfo() + if err == nil && meminfo.MemTotal > 0 { + s.machineMemory = uint64(meminfo.MemTotal) + } + + go s.run() + return s +} + +// statsCollector manages and provides container resource stats +type statsCollector struct { + m sync.Mutex + supervisor statsSupervisor + interval time.Duration + clockTicksPerSecond uint64 + publishers map[*container.Container]*pubsub.Publisher + bufReader *bufio.Reader + machineMemory uint64 +} + +// collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *statsCollector) collect(c *container.Container) chan interface{} { + s.m.Lock() + defer s.m.Unlock() + publisher, exists := s.publishers[c] + if !exists { + publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) + s.publishers[c] = publisher + } + return publisher.Subscribe() +} + +// stopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *statsCollector) stopCollection(c *container.Container) { + s.m.Lock() + if publisher, exists := s.publishers[c]; exists { + publisher.Close() + delete(s.publishers, c) + } + s.m.Unlock() +} + +// unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { + s.m.Lock() + publisher := s.publishers[c] + if publisher != nil { + publisher.Evict(ch) + if publisher.Len() == 0 { + delete(s.publishers, c) + } + } + s.m.Unlock() +} + +func (s *statsCollector) run() { + type publishersPair struct { + container *container.Container + publisher *pubsub.Publisher + } + // we cannot determine the capacity here. + // it will grow enough in first iteration + var pairs []publishersPair + + for range time.Tick(s.interval) { + // it does not make sense in the first iteration, + // but saves allocations in further iterations + pairs = pairs[:0] + + s.m.Lock() + for container, publisher := range s.publishers { + // copy pointers here to release the lock ASAP + pairs = append(pairs, publishersPair{container, publisher}) + } + s.m.Unlock() + if len(pairs) == 0 { + continue + } + + systemUsage, err := s.getSystemCPUUsage() + if err != nil { + logrus.Errorf("collecting system cpu usage: %v", err) + continue + } + + for _, pair := range pairs { + stats, err := s.supervisor.GetContainerStats(pair.container) + if err != nil { + if _, ok := err.(errNotRunning); !ok { + logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) + } + continue + } + // FIXME: move to containerd + stats.CPUStats.SystemUsage = systemUsage + + pair.publisher.Publish(*stats) + } + } +} + +const nanoSecondsPerSecond = 1e9 + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. +// +// Uses /proc/stat defined by POSIX. Looks for the cpu +// statistics line and then sums up the first seven fields +// provided. See `man 5 proc` for details on specific field +// information. +func (s *statsCollector) getSystemCPUUsage() (uint64, error) { + var line string + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + defer func() { + s.bufReader.Reset(nil) + f.Close() + }() + s.bufReader.Reset(f) + err = nil + for err == nil { + line, err = s.bufReader.ReadString('\n') + if err != nil { + break + } + parts := strings.Fields(line) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("invalid number of cpu fields") + } + var totalClockTicks uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) + } + totalClockTicks += v + } + return (totalClockTicks * nanoSecondsPerSecond) / + s.clockTicksPerSecond, nil + } + } + return 0, fmt.Errorf("invalid stat format. Error trying to parse the '/proc/stat' file") +} diff --git a/vendor/github.com/docker/docker/daemon/stop.go b/vendor/github.com/docker/docker/daemon/stop.go new file mode 100644 index 00000000..70174300 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stop.go @@ -0,0 +1,65 @@ +package daemon + +import ( + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/errors" +) + +// ContainerStop looks for the given container and terminates it, +// waiting the given number of seconds before forcefully killing the +// container. If a negative number of seconds is given, ContainerStop +// will wait for a graceful termination. An error is returned if the +// container is not found, is already stopped, or if there is a +// problem stopping the container. +func (daemon *Daemon) ContainerStop(name string, seconds int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if !container.IsRunning() { + err := fmt.Errorf("Container %s is already stopped", name) + return errors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + if err := daemon.containerStop(container, seconds); err != nil { + return fmt.Errorf("Cannot stop container %s: %v", name, err) + } + return nil +} + +// containerStop halts a container by sending a stop signal, waiting for the given +// duration in seconds, and then calling SIGKILL and waiting for the +// process to exit. If a negative duration is given, Stop will wait +// for the initial signal forever. If the container is not running Stop returns +// immediately. +func (daemon *Daemon) containerStop(container *container.Container, seconds int) error { + if !container.IsRunning() { + return nil + } + + stopSignal := container.StopSignal() + // 1. Send a stop signal + if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil { + logrus.Infof("Failed to send signal %d to the process, force killing", stopSignal) + if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { + return err + } + } + + // 2. Wait for the process to exit on its own + if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { + logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal) + // 3. If it doesn't, then send SIGKILL + if err := daemon.Kill(container); err != nil { + container.WaitStop(-1 * time.Second) + logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it + } + } + + daemon.LogContainerEvent(container, "stop") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/top_unix.go b/vendor/github.com/docker/docker/daemon/top_unix.go new file mode 100644 index 00000000..5afa6024 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/top_unix.go @@ -0,0 +1,85 @@ +//+build !windows + +package daemon + +import ( + "fmt" + "github.com/docker/containerd/subreaper/exec" + "strconv" + "strings" + + "github.com/docker/engine-api/types" +) + +// ContainerTop lists the processes running inside of the given +// container by calling ps with the given args, or with the flags +// "-ef" if no args are given. An error is returned if the container +// is not found, or is not running, or if there are any problems +// running ps, or parsing the output. +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { + if psArgs == "" { + psArgs = "-ef" + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + + pids, err := daemon.containerd.GetPidsForContainer(container.ID) + if err != nil { + return nil, err + } + + output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() + if err != nil { + return nil, fmt.Errorf("Error running ps: %v", err) + } + + procList := &types.ContainerProcessList{} + + lines := strings.Split(string(output), "\n") + procList.Titles = strings.Fields(lines[0]) + + pidIndex := -1 + for i, name := range procList.Titles { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return nil, fmt.Errorf("Couldn't find PID field in ps output") + } + + // loop through the output and extract the PID from each line + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := strings.Fields(line) + p, err := strconv.Atoi(fields[pidIndex]) + if err != nil { + return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + for _, pid := range pids { + if pid == p { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(procList.Titles)-1] + process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) + procList.Processes = append(procList.Processes, process) + } + } + } + daemon.LogContainerEvent(container, "top") + return procList, nil +} diff --git a/vendor/github.com/docker/docker/daemon/unpause.go b/vendor/github.com/docker/docker/daemon/unpause.go new file mode 100644 index 00000000..c1ab74b0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/unpause.go @@ -0,0 +1,43 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerUnpause unpauses a container +func (daemon *Daemon) ContainerUnpause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerUnpause(container); err != nil { + return err + } + + return nil +} + +// containerUnpause resumes the container execution after the container is paused. +func (daemon *Daemon) containerUnpause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot unpause the container which is not running + if !container.Running { + return errNotRunning{container.ID} + } + + // We cannot unpause the container which is not paused + if !container.Paused { + return fmt.Errorf("Container %s is not paused", container.ID) + } + + if err := daemon.containerd.Resume(container.ID); err != nil { + return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/update.go b/vendor/github.com/docker/docker/daemon/update.go new file mode 100644 index 00000000..fee470a3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/update.go @@ -0,0 +1,100 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/engine-api/types/container" +) + +// ContainerUpdate updates configuration of the container +func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) ([]string, error) { + var warnings []string + + warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true) + if err != nil { + return warnings, err + } + + if err := daemon.update(name, hostConfig); err != nil { + return warnings, err + } + + return warnings, nil +} + +// ContainerUpdateCmdOnBuild updates Path and Args for the container with ID cID. +func (daemon *Daemon) ContainerUpdateCmdOnBuild(cID string, cmd []string) error { + if len(cmd) == 0 { + return nil + } + c, err := daemon.GetContainer(cID) + if err != nil { + return err + } + c.Path = cmd[0] + c.Args = cmd[1:] + return nil +} + +func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { + if hostConfig == nil { + return nil + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + restoreConfig := false + backupHostConfig := *container.HostConfig + defer func() { + if restoreConfig { + container.Lock() + container.HostConfig = &backupHostConfig + container.ToDisk() + container.Unlock() + } + }() + + if container.RemovalInProgress || container.Dead { + return errCannotUpdate(container.ID, fmt.Errorf("Container is marked for removal and cannot be \"update\".")) + } + + if container.IsRunning() && hostConfig.KernelMemory != 0 { + return errCannotUpdate(container.ID, fmt.Errorf("Can not update kernel memory to a running container, please stop it first.")) + } + + if err := container.UpdateContainer(hostConfig); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + + // if Restart Policy changed, we need to update container monitor + container.UpdateMonitor(hostConfig.RestartPolicy) + + // if container is restarting, wait 5 seconds until it's running + if container.IsRestarting() { + container.WaitRunning(5 * time.Second) + } + + // If container is not running, update hostConfig struct is enough, + // resources will be updated when the container is started again. + // If container is running (including paused), we need to update configs + // to the real world. + if container.IsRunning() && !container.IsRestarting() { + if err := daemon.containerd.UpdateResources(container.ID, toContainerdResources(hostConfig.Resources)); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + } + + daemon.LogContainerEvent(container, "update") + + return nil +} + +func errCannotUpdate(containerID string, err error) error { + return fmt.Errorf("Cannot update container %s: %v", containerID, err) +} diff --git a/vendor/github.com/docker/docker/daemon/update_linux.go b/vendor/github.com/docker/docker/daemon/update_linux.go new file mode 100644 index 00000000..97ba7c09 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/update_linux.go @@ -0,0 +1,25 @@ +// +build linux + +package daemon + +import ( + "github.com/docker/docker/libcontainerd" + "github.com/docker/engine-api/types/container" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + r.BlkioWeight = uint32(resources.BlkioWeight) + r.CpuShares = uint32(resources.CPUShares) + r.CpuPeriod = uint32(resources.CPUPeriod) + r.CpuQuota = uint32(resources.CPUQuota) + r.CpusetCpus = resources.CpusetCpus + r.CpusetMems = resources.CpusetMems + r.MemoryLimit = uint32(resources.Memory) + if resources.MemorySwap > 0 { + r.MemorySwap = uint32(resources.MemorySwap) + } + r.MemoryReservation = uint32(resources.MemoryReservation) + r.KernelMemoryLimit = uint32(resources.KernelMemory) + return r +} diff --git a/vendor/github.com/docker/docker/daemon/volumes.go b/vendor/github.com/docker/docker/daemon/volumes.go new file mode 100644 index 00000000..37f4e7fa --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes.go @@ -0,0 +1,178 @@ +package daemon + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/volume" + "github.com/docker/engine-api/types" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // ErrVolumeReadonly is used to signal an error when trying to copy data into + // a volume mount that is not writable. + ErrVolumeReadonly = errors.New("mounted volume is marked read-only") +) + +type mounts []container.Mount + +// volumeToAPIType converts a volume.Volume to the type used by the remote API +func volumeToAPIType(v volume.Volume) *types.Volume { + tv := &types.Volume{ + Name: v.Name(), + Driver: v.DriverName(), + Mountpoint: v.Path(), + } + if v, ok := v.(interface { + Labels() map[string]string + }); ok { + tv.Labels = v.Labels() + } + return tv +} + +// Len returns the number of mounts. Used in sorting. +func (m mounts) Len() int { + return len(m) +} + +// Less returns true if the number of parts (a/b/c would be 3 parts) in the +// mount indexed by parameter 1 is less than that of the mount indexed by +// parameter 2. Used in sorting. +func (m mounts) Less(i, j int) bool { + return m.parts(i) < m.parts(j) +} + +// Swap swaps two items in an array of mounts. Used in sorting +func (m mounts) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +// parts returns the number of parts in the destination of a mount. Used in sorting. +func (m mounts) parts(i int) int { + return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) +} + +// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. +// It follows the next sequence to decide what to mount in each final destination: +// +// 1. Select the previously configured mount points for the containers, if any. +// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. +// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. +// 4. Cleanup old volumes that are about to be reassigned. +func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) error { + binds := map[string]bool{} + mountPoints := map[string]*volume.MountPoint{} + + // 1. Read already configured mount points. + for name, point := range container.MountPoints { + mountPoints[name] = point + } + + // 2. Read volumes from other containers. + for _, v := range hostConfig.VolumesFrom { + containerID, mode, err := volume.ParseVolumesFrom(v) + if err != nil { + return err + } + + c, err := daemon.GetContainer(containerID) + if err != nil { + return err + } + + for _, m := range c.MountPoints { + cp := &volume.MountPoint{ + Name: m.Name, + Source: m.Source, + RW: m.RW && volume.ReadWrite(mode), + Driver: m.Driver, + Destination: m.Destination, + Propagation: m.Propagation, + Named: m.Named, + } + + if len(cp.Source) == 0 { + v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID) + if err != nil { + return err + } + cp.Volume = v + } + + mountPoints[cp.Destination] = cp + } + } + + // 3. Read bind mounts + for _, b := range hostConfig.Binds { + // #10618 + bind, err := volume.ParseMountSpec(b, hostConfig.VolumeDriver) + if err != nil { + return err + } + + if binds[bind.Destination] { + return fmt.Errorf("Duplicate mount point '%s'", bind.Destination) + } + + if len(bind.Name) > 0 { + // create the volume + v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil) + if err != nil { + return err + } + bind.Volume = v + bind.Source = v.Path() + // bind.Name is an already existing volume, we need to use that here + bind.Driver = v.DriverName() + bind.Named = true + if bind.Driver == "local" { + bind = setBindModeIfNull(bind) + } + } + + if label.RelabelNeeded(bind.Mode) { + if err := label.Relabel(bind.Source, container.MountLabel, label.IsShared(bind.Mode)); err != nil { + return err + } + } + binds[bind.Destination] = true + mountPoints[bind.Destination] = bind + } + + container.Lock() + + // 4. Cleanup old volumes that are about to be reassigned. + for _, m := range mountPoints { + if m.BackwardsCompatible() { + if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { + daemon.volumes.Dereference(mp.Volume, container.ID) + } + } + } + container.MountPoints = mountPoints + + container.Unlock() + + return nil +} + +// lazyInitializeVolume initializes a mountpoint's volume if needed. +// This happens after a daemon restart. +func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { + if len(m.Driver) > 0 && m.Volume == nil { + v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) + if err != nil { + return err + } + m.Volume = v + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/volumes_unix.go b/vendor/github.com/docker/docker/daemon/volumes_unix.go new file mode 100644 index 00000000..078fd10b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes_unix.go @@ -0,0 +1,78 @@ +// +build !windows + +package daemon + +import ( + "os" + "sort" + "strconv" + + "github.com/docker/docker/container" + "github.com/docker/docker/volume" +) + +// setupMounts iterates through each of the mount points for a container and +// calls Setup() on each. It also looks to see if is a network mount such as +// /etc/resolv.conf, and if it is not, appends it to the array of mounts. +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mounts []container.Mount + for _, m := range c.MountPoints { + if err := daemon.lazyInitializeVolume(c.ID, m); err != nil { + return nil, err + } + path, err := m.Setup() + if err != nil { + return nil, err + } + if !c.TrySetNetworkMount(m.Destination, path) { + mnt := container.Mount{ + Source: path, + Destination: m.Destination, + Writable: m.RW, + Propagation: m.Propagation, + } + if m.Volume != nil { + attributes := map[string]string{ + "driver": m.Volume.DriverName(), + "container": c.ID, + "destination": m.Destination, + "read/write": strconv.FormatBool(m.RW), + "propagation": m.Propagation, + } + daemon.LogVolumeEvent(m.Volume.Name(), "mount", attributes) + } + mounts = append(mounts, mnt) + } + } + + mounts = sortMounts(mounts) + netMounts := c.NetworkMounts() + // if we are going to mount any of the network files from container + // metadata, the ownership must be set properly for potential container + // remapped root (user namespaces) + rootUID, rootGID := daemon.GetRemappedUIDGID() + for _, mount := range netMounts { + if err := os.Chown(mount.Source, rootUID, rootGID); err != nil { + return nil, err + } + } + return append(mounts, netMounts...), nil +} + +// sortMounts sorts an array of mounts in lexicographic order. This ensure that +// when mounting, the mounts don't shadow other mounts. For example, if mounting +// /etc and /etc/resolv.conf, /etc/resolv.conf must not be mounted first. +func sortMounts(m []container.Mount) []container.Mount { + sort.Sort(mounts(m)) + return m +} + +// setBindModeIfNull is platform specific processing to ensure the +// shared mode is set to 'z' if it is null. This is called in the case +// of processing a named volume and not a typical bind. +func setBindModeIfNull(bind *volume.MountPoint) *volume.MountPoint { + if bind.Mode == "" { + bind.Mode = "z" + } + return bind +} diff --git a/vendor/github.com/docker/docker/daemon/wait.go b/vendor/github.com/docker/docker/daemon/wait.go new file mode 100644 index 00000000..52b335cd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/wait.go @@ -0,0 +1,17 @@ +package daemon + +import "time" + +// ContainerWait stops processing until the given container is +// stopped. If the container is not found, an error is returned. On a +// successful stop, the exit code of the container is returned. On a +// timeout, an error is returned. If you want to wait forever, supply +// a negative duration for the timeout. +func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return -1, err + } + + return container.WaitStop(timeout) +} diff --git a/vendor/github.com/docker/docker/distribution/errors.go b/vendor/github.com/docker/docker/distribution/errors.go new file mode 100644 index 00000000..f7a9c621 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/errors.go @@ -0,0 +1,113 @@ +package distribution + +import ( + "net/url" + "strings" + "syscall" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/distribution/xfer" +) + +// ErrNoSupport is an error type used for errors indicating that an operation +// is not supported. It encapsulates a more specific error. +type ErrNoSupport struct{ Err error } + +func (e ErrNoSupport) Error() string { + if e.Err == nil { + return "not supported" + } + return e.Err.Error() +} + +// fallbackError wraps an error that can possibly allow fallback to a different +// endpoint. +type fallbackError struct { + // err is the error being wrapped. + err error + // confirmedV2 is set to true if it was confirmed that the registry + // supports the v2 protocol. This is used to limit fallbacks to the v1 + // protocol. + confirmedV2 bool + // transportOK is set to true if we managed to speak HTTP with the + // registry. This confirms that we're using appropriate TLS settings + // (or lack of TLS). + transportOK bool +} + +// Error renders the FallbackError as a string. +func (f fallbackError) Error() string { + return f.err.Error() +} + +// shouldV2Fallback returns true if this error is a reason to fall back to v1. +func shouldV2Fallback(err errcode.Error) bool { + switch err.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false +} + +// continueOnError returns true if we should fallback to the next endpoint +// as a result of this error. +func continueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return continueOnError(v[0]) + case ErrNoSupport: + return continueOnError(v.Err) + case errcode.Error: + return shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + case ImageConfigPullError: + return false + case error: + return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} + +// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the +// operation after this error. +func retryOnError(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + return retryOnError(v[0]) + } + case errcode.Error: + switch v.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied: + return xfer.DoNotRetry{Err: err} + } + case *url.Error: + switch v.Err { + case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken: + return xfer.DoNotRetry{Err: v.Err} + } + return retryOnError(v.Err) + case *client.UnexpectedHTTPResponseError: + return xfer.DoNotRetry{Err: err} + case error: + if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { + return xfer.DoNotRetry{Err: err} + } + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return err +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/metadata.go b/vendor/github.com/docker/docker/distribution/metadata/metadata.go new file mode 100644 index 00000000..9f744d46 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/metadata.go @@ -0,0 +1,77 @@ +package metadata + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" +) + +// Store implements a K/V store for mapping distribution-related IDs +// to on-disk layer IDs and image IDs. The namespace identifies the type of +// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. +type Store interface { + // Get retrieves data by namespace and key. + Get(namespace string, key string) ([]byte, error) + // Set writes data indexed by namespace and key. + Set(namespace, key string, value []byte) error + // Delete removes data indexed by namespace and key. + Delete(namespace, key string) error +} + +// FSMetadataStore uses the filesystem to associate metadata with layer and +// image IDs. +type FSMetadataStore struct { + sync.RWMutex + basePath string +} + +// NewFSMetadataStore creates a new filesystem-based metadata store. +func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { + if err := os.MkdirAll(basePath, 0700); err != nil { + return nil, err + } + return &FSMetadataStore{ + basePath: basePath, + }, nil +} + +func (store *FSMetadataStore) path(namespace, key string) string { + return filepath.Join(store.basePath, namespace, key) +} + +// Get retrieves data by namespace and key. The data is read from a file named +// after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { + store.RLock() + defer store.RUnlock() + + return ioutil.ReadFile(store.path(namespace, key)) +} + +// Set writes data indexed by namespace and key. The data is written to a file +// named after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + tempFilePath := path + ".tmp" + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(tempFilePath, value, 0644); err != nil { + return err + } + return os.Rename(tempFilePath, path) +} + +// Delete removes data indexed by namespace and key. The data file named after +// the key, stored in the namespace's directory is deleted. +func (store *FSMetadataStore) Delete(namespace, key string) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + return os.Remove(path) +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go new file mode 100644 index 00000000..f6e45892 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go @@ -0,0 +1,44 @@ +package metadata + +import ( + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" +) + +// V1IDService maps v1 IDs to layers on disk. +type V1IDService struct { + store Store +} + +// NewV1IDService creates a new V1 ID mapping service. +func NewV1IDService(store Store) *V1IDService { + return &V1IDService{ + store: store, + } +} + +// namespace returns the namespace used by this service. +func (idserv *V1IDService) namespace() string { + return "v1id" +} + +// Get finds a layer by its V1 ID. +func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { + if err := v1.ValidateID(v1ID); err != nil { + return layer.DiffID(""), err + } + + idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) + if err != nil { + return layer.DiffID(""), err + } + return layer.DiffID(idBytes), nil +} + +// Set associates an image with a V1 ID. +func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { + if err := v1.ValidateID(v1ID); err != nil { + return err + } + return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go new file mode 100644 index 00000000..239cd1f4 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go @@ -0,0 +1,137 @@ +package metadata + +import ( + "encoding/json" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +// V2MetadataService maps layer IDs to a set of known metadata for +// the layer. +type V2MetadataService struct { + store Store +} + +// V2Metadata contains the digest and source repository information for a layer. +type V2Metadata struct { + Digest digest.Digest + SourceRepository string +} + +// maxMetadata is the number of metadata entries to keep per layer DiffID. +const maxMetadata = 50 + +// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. +func NewV2MetadataService(store Store) *V2MetadataService { + return &V2MetadataService{ + store: store, + } +} + +func (serv *V2MetadataService) diffIDNamespace() string { + return "v2metadata-by-diffid" +} + +func (serv *V2MetadataService) digestNamespace() string { + return "diffid-by-digest" +} + +func (serv *V2MetadataService) diffIDKey(diffID layer.DiffID) string { + return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() +} + +func (serv *V2MetadataService) digestKey(dgst digest.Digest) string { + return string(dgst.Algorithm()) + "/" + dgst.Hex() +} + +// GetMetadata finds the metadata associated with a layer DiffID. +func (serv *V2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { + jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + if err != nil { + return nil, err + } + + var metadata []V2Metadata + if err := json.Unmarshal(jsonBytes, &metadata); err != nil { + return nil, err + } + + return metadata, nil +} + +// GetDiffID finds a layer DiffID from a digest. +func (serv *V2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) + if err != nil { + return layer.DiffID(""), err + } + + return layer.DiffID(diffIDBytes), nil +} + +// Add associates metadata with a layer DiffID. If too many metadata entries are +// present, the oldest one is dropped. +func (serv *V2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + newMetadata = append(newMetadata, metadata) + + if len(newMetadata) > maxMetadata { + newMetadata = newMetadata[len(newMetadata)-maxMetadata:] + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) + if err != nil { + return err + } + + return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) +} + +// Remove unassociates a metadata entry from a layer DiffID. +func (serv *V2MetadataService) Remove(metadata V2Metadata) error { + diffID, err := serv.GetDiffID(metadata.Digest) + if err != nil { + return err + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + if len(newMetadata) == 0 { + return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) +} diff --git a/vendor/github.com/docker/docker/distribution/pull.go b/vendor/github.com/docker/docker/distribution/pull.go new file mode 100644 index 00000000..4b42371b --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull.go @@ -0,0 +1,205 @@ +package distribution + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ImagePullConfig stores pull configuration. +type ImagePullConfig struct { + // MetaHeaders stores HTTP headers with metadata about the image + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the pull + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService *registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // ImageStore manages images. + ImageStore image.Store + // ReferenceStore manages tags. + ReferenceStore reference.Store + // DownloadManager manages concurrent pulls. + DownloadManager *xfer.LayerDownloadManager +} + +// Puller is an interface that abstracts pulling for different API versions. +type Puller interface { + // Pull tries to pull the image referenced by `tag` + // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. + // + Pull(ctx context.Context, ref reference.Named) error +} + +// newPuller returns a Puller interface that will pull from either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 puller will be created. The other parameters are passed +// through to the underlying puller implementation for use during the actual +// pull operation. +func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Puller{ + V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return &v1Puller{ + v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Pull initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + // makes sure name is not empty or `scratch` + if err := validateRepoName(repoInfo.Name()); err != nil { + return err + } + + endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo.Hostname()) + if err != nil { + return err + } + + var ( + lastErr error + + // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport + // By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in lastErr. + // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of + // any subsequent ErrNoSupport errors in lastErr. + // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be + // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant + // error is the ones from v2 endpoints not v1. + discardNoSupportErrors bool + + // confirmedV2 is set to true if a pull attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + for _, endpoint := range endpoints { + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) + + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + lastErr = err + continue + } + if err := puller.Pull(ctx, ref); err != nil { + // Was this pull cancelled? If so, don't try to fall + // back. + fallback := false + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + fallback = true + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + } + } + if fallback { + if _, ok := err.(ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // append subsequent errors + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + // append subsequent errors + lastErr = err + } + logrus.Errorf("Attempting next endpoint for pull after error: %v", err) + continue + } + logrus.Errorf("Not continuing with pull after error: %v", err) + return err + } + + imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", ref.String()) + } + + return lastErr +} + +// writeStatus writes a status message to out. If layersDownloaded is true, the +// status message indicates that a newer image was downloaded. Otherwise, it +// indicates that the image is up to date. requestedTag is the tag the message +// will refer to. +func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { + if layersDownloaded { + progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) + } else { + progress.Message(out, "", "Status: Image is up to date for "+requestedTag) + } +} + +// validateRepoName validates the name of a repository. +func validateRepoName(name string) error { + if name == "" { + return fmt.Errorf("Repository name can't be empty") + } + if name == api.NoBaseImageSpecifier { + return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) + } + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v1.go b/vendor/github.com/docker/docker/distribution/pull_v1.go new file mode 100644 index 00000000..86fad2ef --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v1.go @@ -0,0 +1,362 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Puller struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + session *registry.Session +} + +func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} + } + + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was ReceiveTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + logrus.Debugf("Fallback from error: %s", err) + return fallbackError{err: err} + } + if err := p.pullRepository(ctx, ref); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + progress.Message(p.config.ProgressOutput, "", p.repoInfo.FullName()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") + + return nil +} + +func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { + progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.FullName()) + + repoData, err := p.session.GetRepositoryData(p.repoInfo) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + return fmt.Errorf("Error: image %s not found", p.repoInfo.RemoteName()) + } + // Unexpected HTTP error + return err + } + + logrus.Debugf("Retrieving the tag list") + var tagsList map[string]string + tagged, isTagged := ref.(reference.NamedTagged) + if !isTagged { + tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo) + } else { + var tagID string + tagsList = make(map[string]string) + tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo, tagged.Tag()) + if err == registry.ErrRepoNotFound { + return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.FullName()) + } + tagsList[tagged.Tag()] = tagID + } + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + layersDownloaded := false + for _, imgData := range repoData.ImgList { + if isTagged && imgData.Tag != tagged.Tag() { + continue + } + + err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) + if err != nil { + return err + } + } + + writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + return nil +} + +func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { + if img.Tag == "" { + logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + return nil + } + + localNameRef, err := reference.WithTag(p.repoInfo, img.Tag) + if err != nil { + retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) + logrus.Debug(retErr.Error()) + return retErr + } + + if err := v1.ValidateID(img.ID); err != nil { + return err + } + + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.FullName()) + success := false + var lastErr error + for _, ep := range p.repoInfo.Index.Mirrors { + ep += "v1/" + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.FullName(), ep)) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + continue + } + success = true + break + } + if !success { + for _, ep := range repoData.Endpoints { + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.FullName(), ep) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + continue + } + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.FullName(), lastErr) + progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) + return err + } + return nil +} + +func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { + var history []string + history, err = p.session.GetRemoteHistory(v1ID, endpoint) + if err != nil { + return err + } + if len(history) < 1 { + return fmt.Errorf("empty history for image %s", v1ID) + } + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") + + var ( + descriptors []xfer.DownloadDescriptor + newHistory []image.History + imgJSON []byte + imgSize int64 + ) + + // Iterate over layers, in order from bottom-most to top-most. Download + // config for all layers and create descriptors. + for i := len(history) - 1; i >= 0; i-- { + v1LayerID := history[i] + imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) + if err != nil { + return err + } + + // Create a new-style config from the legacy configs + h, err := v1.HistoryFromConfig(imgJSON, false) + if err != nil { + return err + } + newHistory = append(newHistory, h) + + layerDescriptor := &v1LayerDescriptor{ + v1LayerID: v1LayerID, + indexName: p.repoInfo.Index.Name, + endpoint: endpoint, + v1IDService: p.v1IDService, + layersDownloaded: layersDownloaded, + layerSize: imgSize, + session: p.session, + } + + descriptors = append(descriptors, layerDescriptor) + } + + rootFS := image.NewRootFS() + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return err + } + defer release() + + config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) + if err != nil { + return err + } + + imageID, err := p.config.ImageStore.Create(config) + if err != nil { + return err + } + + if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { + return err + } + + return nil +} + +func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") + + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) + if err != nil && j == retries { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") + return nil, 0, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + + return imgJSON, imgSize, nil + } + + // not reached + return nil, 0, nil +} + +type v1LayerDescriptor struct { + v1LayerID string + indexName string + endpoint string + v1IDService *metadata.V1IDService + layersDownloaded *bool + layerSize int64 + session *registry.Session + tmpFile *os.File +} + +func (ld *v1LayerDescriptor) Key() string { + return "v1:" + ld.v1LayerID +} + +func (ld *v1LayerDescriptor) ID() string { + return stringid.TruncateID(ld.v1LayerID) +} + +func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) +} + +func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + progress.Update(progressOutput, ld.ID(), "Pulling fs layer") + layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) + if err != nil { + progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() { + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + *ld.layersDownloaded = true + + ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob") + if err != nil { + layerReader.Close() + return nil, 0, err + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") + defer reader.Close() + + _, err = io.Copy(ld.tmpFile, reader) + if err != nil { + ld.Close() + return nil, 0, err + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name()) + + ld.tmpFile.Seek(0, 0) + + // hand off the temporary file to the download manager, so it will only + // be closed once + tmpFile := ld.tmpFile + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), ld.layerSize, nil +} + +func (ld *v1LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile = nil + } +} + +func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2.go b/vendor/github.com/docker/docker/distribution/pull_v2.go new file mode 100644 index 00000000..748ef422 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2.go @@ -0,0 +1,840 @@ +package distribution + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +var errRootFSMismatch = errors.New("layers from manifest don't match image configuration") + +// ImageConfigPullError is an error pulling the image config blob +// (only applies to schema2). +type ImageConfigPullError struct { + Err error +} + +// Error returns the error string for ImageConfigPullError. +func (e ImageConfigPullError) Error() string { + return "error pulling image configuration: " + e.Err.Error() +} + +type v2Puller struct { + V2MetadataService *metadata.V2MetadataService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + repo distribution.Repository + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { + // TODO(tiborvass): was ReceiveTimeout + p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + logrus.Warnf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pullV2Repository(ctx, ref); err != nil { + if _, ok := err.(fallbackError); ok { + return err + } + if continueOnError(err) { + logrus.Errorf("Error trying v2 registry: %v", err) + return fallbackError{ + err: err, + confirmedV2: p.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { + var layersDownloaded bool + if !reference.IsNameOnly(ref) { + layersDownloaded, err = p.pullV2Tag(ctx, ref) + if err != nil { + return err + } + } else { + tags, err := p.repo.Tags(ctx).All(ctx) + if err != nil { + // If this repository doesn't exist on V2, we should + // permit a fallback to V1. + return allowV1Fallback(err) + } + + // The v2 registry knows about this repository, so we will not + // allow fallback to the v1 protocol even if we encounter an + // error later on. + p.confirmedV2 = true + + for _, tag := range tags { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + return err + } + pulledNew, err := p.pullV2Tag(ctx, tagRef) + if err != nil { + // Since this is the pull-all-tags case, don't + // allow an error pulling a particular tag to + // make the whole pull fall back to v1. + if fallbackErr, ok := err.(fallbackError); ok { + return fallbackErr.err + } + return err + } + // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged + // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? + layersDownloaded = layersDownloaded || pulledNew + } + } + + writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + + return nil +} + +type v2LayerDescriptor struct { + digest digest.Digest + repoInfo *registry.RepositoryInfo + repo distribution.Repository + V2MetadataService *metadata.V2MetadataService + tmpFile *os.File + verifier digest.Verifier +} + +func (ld *v2LayerDescriptor) Key() string { + return "v2:" + ld.digest.String() +} + +func (ld *v2LayerDescriptor) ID() string { + return stringid.TruncateID(ld.digest.String()) +} + +func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.V2MetadataService.GetDiffID(ld.digest) +} + +func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + logrus.Debugf("pulling blob %q", ld.digest) + + var ( + err error + offset int64 + ) + + if ld.tmpFile == nil { + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else { + offset, err = ld.tmpFile.Seek(0, os.SEEK_END) + if err != nil { + logrus.Debugf("error seeking to end of download file: %v", err) + offset = 0 + + ld.tmpFile.Close() + if err := os.Remove(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else if offset != 0 { + logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) + } + } + + tmpFile := ld.tmpFile + blobs := ld.repo.Blobs(ctx) + + layerDownload, err := blobs.Open(ctx, ld.digest) + if err != nil { + logrus.Errorf("Error initiating layer download: %v", err) + if err == distribution.ErrBlobUnknown { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, retryOnError(err) + } + + if offset != 0 { + _, err := layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + } + size, err := layerDownload.Seek(0, os.SEEK_END) + if err != nil { + // Seek failed, perhaps because there was no Content-Length + // header. This shouldn't fail the download, because we can + // still continue without a progress bar. + size = 0 + } else { + if size != 0 && offset > size { + logrus.Debugf("Partial download is larger than full blob. Starting over") + offset = 0 + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + // Restore the seek offset either at the beginning of the + // stream, or just after the last byte we have from previous + // attempts. + _, err = layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + return nil, 0, err + } + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") + defer reader.Close() + + if ld.verifier == nil { + ld.verifier, err = digest.NewDigestVerifier(ld.digest) + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) + if err != nil { + if err == transport.ErrWrongCodeForByteRange { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + return nil, 0, retryOnError(err) + } + + progress.Update(progressOutput, ld.ID(), "Verifying Checksum") + + if !ld.verifier.Verified() { + err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) + logrus.Error(err) + + // Allow a retry if this digest verification error happened + // after a resumed download. + if offset != 0 { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) + + _, err = tmpFile.Seek(0, os.SEEK_SET) + if err != nil { + tmpFile.Close() + if err := os.Remove(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + ld.tmpFile = nil + ld.verifier = nil + return nil, 0, xfer.DoNotRetry{Err: err} + } + + // hand off the temporary file to the download manager, so it will only + // be closed once + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), size, nil +} + +func (ld *v2LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + } +} + +func (ld *v2LayerDescriptor) truncateDownloadFile() error { + // Need a new hash context since we will be redoing the download + ld.verifier = nil + + if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { + logrus.Errorf("error seeking to beginning of download file: %v", err) + return err + } + + if err := ld.tmpFile.Truncate(0); err != nil { + logrus.Errorf("error truncating download file: %v", err) + return err + } + + return nil +} + +func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()}) +} + +func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return false, err + } + + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + ) + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + // NOTE: not using TagService.Get, since it uses HEAD requests + // against the manifests endpoint, which are not supported by + // all registry versions. + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() + } else if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return false, err + } + tagOrDigest = digested.Digest().String() + } else { + return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) + } + + if manifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + p.confirmedV2 = true + + logrus.Debugf("Pulling ref from V2 registry: %s", ref.String()) + progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name()) + + var ( + imageID image.ID + manifestDigest digest.Digest + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v) + if err != nil { + return false, err + } + case *schema2.DeserializedManifest: + imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v) + if err != nil { + return false, err + } + case *manifestlist.DeserializedManifestList: + imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v) + if err != nil { + return false, err + } + default: + return false, errors.New("unsupported manifest format") + } + + progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) + + oldTagImageID, err := p.config.ReferenceStore.Get(ref) + if err == nil { + if oldTagImageID == imageID { + return false, nil + } + } else if err != reference.ErrDoesNotExist { + return false, err + } + + if canonical, ok := ref.(reference.Canonical); ok { + if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil { + return false, err + } + } else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil { + return false, err + } + + return true, nil +} + +func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) { + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return "", "", err + } + + rootFS := image.NewRootFS() + + if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil { + return "", "", err + } + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return "", "", err + } + + var descriptors []xfer.DownloadDescriptor + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + blobSum := verifiedManifest.FSLayers[i].BlobSum + + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return "", "", err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return "", "", err + } + history = append(history, h) + + if throwAway.ThrowAway { + continue + } + + layerDescriptor := &v2LayerDescriptor{ + digest: blobSum, + repoInfo: p.repoInfo, + repo: p.repo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return "", "", err + } + defer release() + + config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) + if err != nil { + return "", "", err + } + + imageID, err = p.config.ImageStore.Create(config) + if err != nil { + return "", "", err + } + + manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) + + return imageID, manifestDigest, nil +} + +func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) { + manifestDigest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return "", "", err + } + + target := mfst.Target() + imageID = image.ID(target.Digest) + if _, err := p.config.ImageStore.Get(imageID); err == nil { + // If the image already exists locally, no need to pull + // anything. + return imageID, manifestDigest, nil + } + + configChan := make(chan []byte, 1) + errChan := make(chan error, 1) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + + // Pull the image config + go func() { + configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest) + if err != nil { + errChan <- ImageConfigPullError{Err: err} + cancel() + return + } + configChan <- configJSON + }() + + var descriptors []xfer.DownloadDescriptor + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for _, d := range mfst.References() { + layerDescriptor := &v2LayerDescriptor{ + digest: d.Digest, + repo: p.repo, + repoInfo: p.repoInfo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + var ( + configJSON []byte // raw serialized image config + unmarshalledConfig image.Image // deserialized image config + downloadRootFS image.RootFS // rootFS to use for registering layers. + ) + if runtime.GOOS == "windows" { + configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) + if err != nil { + return "", "", err + } + if unmarshalledConfig.RootFS == nil { + return "", "", errors.New("image config has no rootfs section") + } + downloadRootFS = *unmarshalledConfig.RootFS + downloadRootFS.DiffIDs = []layer.DiffID{} + } else { + downloadRootFS = *image.NewRootFS() + } + + rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) + if err != nil { + if configJSON != nil { + // Already received the config + return "", "", err + } + select { + case err = <-errChan: + return "", "", err + default: + cancel() + select { + case <-configChan: + case <-errChan: + } + return "", "", err + } + } + defer release() + + if configJSON == nil { + configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) + if err != nil { + return "", "", err + } + } + + // The DiffIDs returned in rootFS MUST match those in the config. + // Otherwise the image config could be referencing layers that aren't + // included in the manifest. + if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + for i := range rootFS.DiffIDs { + if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] { + return "", "", errRootFSMismatch + } + } + + imageID, err = p.config.ImageStore.Create(configJSON) + if err != nil { + return "", "", err + } + + return imageID, manifestDigest, nil +} + +func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) { + select { + case configJSON := <-configChan: + var unmarshalledConfig image.Image + if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil { + return nil, image.Image{}, err + } + return configJSON, unmarshalledConfig, nil + case err := <-errChan: + return nil, image.Image{}, err + // Don't need a case for ctx.Done in the select because cancellation + // will trigger an error in p.pullSchema2ImageConfig. + } +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specifc manifests. +func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) { + manifestListDigest, err = schema2ManifestDigest(ref, mfstList) + if err != nil { + return "", "", err + } + + var manifestDigest digest.Digest + for _, manifestDescriptor := range mfstList.Manifests { + // TODO(aaronl): The manifest list spec supports optional + // "features" and "variant" fields. These are not yet used. + // Once they are, their values should be interpreted here. + if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { + manifestDigest = manifestDescriptor.Digest + break + } + } + + if manifestDigest == "" { + return "", "", errors.New("no supported platform found in manifest list") + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return "", "", err + } + + manifest, err := manSvc.Get(ctx, manifestDigest) + if err != nil { + return "", "", err + } + + manifestRef, err := reference.WithDigest(ref, manifestDigest) + if err != nil { + return "", "", err + } + + switch v := manifest.(type) { + case *schema1.SignedManifest: + imageID, _, err = p.pullSchema1(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + case *schema2.DeserializedManifest: + imageID, _, err = p.pullSchema2(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + default: + return "", "", errors.New("unsupported manifest format") + } + + return imageID, manifestListDigest, err +} + +func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := p.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return nil, err + } + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return "", err + } + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + } + case errcode.Error: + if shouldV2Fallback(v) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + case *url.Error: + if v.Err == auth.ErrNoBasicAuthCredentials { + return fallbackError{err: err, confirmedV2: false} + } + } + + return err +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return nil, err + } + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("Invalid parent ID in the base layer of the image.") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} + +func createDownloadFile() (*os.File, error) { + return ioutil.TempFile("", "GetImageBlob") +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_unix.go b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go new file mode 100644 index 00000000..9fbb875e --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package distribution + +import ( + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/docker/image" +) + +func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error { + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/push.go b/vendor/github.com/docker/docker/distribution/push.go new file mode 100644 index 00000000..52ee8e77 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push.go @@ -0,0 +1,219 @@ +package distribution + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +// ImagePushConfig stores push configuration. +type ImagePushConfig struct { + // MetaHeaders store HTTP headers with metadata about the image + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the push + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService *registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // LayerStore manages layers. + LayerStore layer.Store + // ImageStore manages images. + ImageStore image.Store + // ReferenceStore manages tags. + ReferenceStore reference.Store + // TrustKey is the private key for legacy signatures. This is typically + // an ephemeral key, since these signatures are no longer verified. + TrustKey libtrust.PrivateKey + // UploadManager dispatches uploads. + UploadManager *xfer.LayerUploadManager +} + +// Pusher is an interface that abstracts pushing for different API versions. +type Pusher interface { + // Push tries to push the image configured at the creation of Pusher. + // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. + // + // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. + Push(ctx context.Context) error +} + +const compressionBufSize = 32768 + +// NewPusher creates a new Pusher interface that will push to either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 pusher will be created. The other parameters are passed +// through to the underlying pusher implementation for use during the actual +// push operation. +func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Pusher{ + v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + case registry.APIVersion1: + return &v1Pusher{ + v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Push initiates a push operation on the repository named localName. +// ref is the specific variant of the image to be pushed. +// If no tag is provided, all tags will be pushed. +func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { + // FIXME: Allow to interrupt current push when new push of same image is done. + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo.Hostname()) + if err != nil { + return err + } + + progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.FullName()) + + associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo) + if len(associations) == 0 { + return fmt.Errorf("Repository does not exist: %s", repoInfo.Name()) + } + + var ( + lastErr error + + // confirmedV2 is set to true if a push attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + + for _, endpoint := range endpoints { + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to push %s to %s %s", repoInfo.FullName(), endpoint.URL, endpoint.Version) + + pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) + if err != nil { + lastErr = err + continue + } + if err := pusher.Push(ctx); err != nil { + // Was this push cancelled? If so, don't try to fall + // back. + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + lastErr = err + logrus.Errorf("Attempting next endpoint for push after error: %v", err) + continue + } + } + + logrus.Errorf("Not continuing with push after error: %v", err) + return err + } + + imagePushConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "push") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.FullName()) + } + return lastErr +} + +// compress returns an io.ReadCloser which will supply a compressed version of +// the provided Reader. The caller must close the ReadCloser after reading the +// compressed data. +// +// Note that this function returns a reader instead of taking a writer as an +// argument so that it can be used with httpBlobWriter's ReadFrom method. +// Using httpBlobWriter's Write method would send a PATCH request for every +// Write call. +// +// The second return value is a channel that gets closed when the goroutine +// is finished. This allows the caller to make sure the goroutine finishes +// before it releases any resources connected with the reader that was +// passed in. +func compress(in io.Reader) (io.ReadCloser, chan struct{}) { + compressionDone := make(chan struct{}) + + pipeReader, pipeWriter := io.Pipe() + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) + compressor := gzip.NewWriter(bufWriter) + + go func() { + _, err := io.Copy(compressor, in) + if err == nil { + err = compressor.Close() + } + if err == nil { + err = bufWriter.Flush() + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + close(compressionDone) + }() + + return pipeReader, compressionDone +} diff --git a/vendor/github.com/docker/docker/distribution/push_v1.go b/vendor/github.com/docker/docker/distribution/push_v1.go new file mode 100644 index 00000000..b6e4a130 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push_v1.go @@ -0,0 +1,454 @@ +package distribution + +import ( + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Pusher struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + ref reference.Named + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + session *registry.Session +} + +func (p *v1Pusher) Push(ctx context.Context) error { + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was NoTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + return fallbackError{err: err} + } + if err := p.pushRepository(ctx); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + return nil +} + +// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an +// image being pushed to a v1 registry. +type v1Image interface { + Config() []byte + Layer() layer.Layer + V1ID() string +} + +type v1ImageCommon struct { + layer layer.Layer + config []byte + v1ID string +} + +func (common *v1ImageCommon) Config() []byte { + return common.config +} + +func (common *v1ImageCommon) V1ID() string { + return common.v1ID +} + +func (common *v1ImageCommon) Layer() layer.Layer { + return common.layer +} + +// v1TopImage defines a runnable (top layer) image being pushed to a v1 +// registry. +type v1TopImage struct { + v1ImageCommon + imageID image.ID +} + +func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { + v1ID := digest.Digest(imageID).Hex() + parentV1ID := "" + if parent != nil { + parentV1ID = parent.V1ID() + } + + config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) + if err != nil { + return nil, err + } + + return &v1TopImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: config, + layer: l, + }, + imageID: imageID, + }, nil +} + +// v1DependencyImage defines a dependency layer being pushed to a v1 registry. +type v1DependencyImage struct { + v1ImageCommon +} + +func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1DependencyImage, error) { + v1ID := digest.Digest(l.ChainID()).Hex() + + config := "" + if parent != nil { + config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) + } else { + config = fmt.Sprintf(`{"id":"%s"}`, v1ID) + } + return &v1DependencyImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: []byte(config), + layer: l, + }, + }, nil +} + +// Retrieve the all the images to be uploaded in the correct order +func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []layer.Layer, err error) { + tagsByImage = make(map[image.ID][]string) + + // Ignore digest references + if _, isCanonical := p.ref.(reference.Canonical); isCanonical { + return + } + + tagged, isTagged := p.ref.(reference.NamedTagged) + if isTagged { + // Push a specific tag + var imgID image.ID + imgID, err = p.config.ReferenceStore.Get(p.ref) + if err != nil { + return + } + + imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) + if err != nil { + return + } + + tagsByImage[imgID] = []string{tagged.Tag()} + + return + } + + imagesSeen := make(map[image.ID]struct{}) + dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) + + associations := p.config.ReferenceStore.ReferencesByName(p.ref) + for _, association := range associations { + if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { + // Ignore digest references. + continue + } + + tagsByImage[association.ImageID] = append(tagsByImage[association.ImageID], tagged.Tag()) + + if _, present := imagesSeen[association.ImageID]; present { + // Skip generating image list for already-seen image + continue + } + imagesSeen[association.ImageID] = struct{}{} + + imageListForThisTag, err := p.imageListForTag(association.ImageID, dependenciesSeen, &referencedLayers) + if err != nil { + return nil, nil, nil, err + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + logrus.Debugf("Image list: %v", imageList) + logrus.Debugf("Tags by image: %v", tagsByImage) + + return +} + +func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]layer.Layer) (imageListForThisTag []v1Image, err error) { + img, err := p.config.ImageStore.Get(imgID) + if err != nil { + return nil, err + } + + topLayerID := img.RootFS.ChainID() + + var l layer.Layer + if topLayerID == "" { + l = layer.EmptyLayer + } else { + l, err = p.config.LayerStore.Get(topLayerID) + *referencedLayers = append(*referencedLayers, l) + if err != nil { + return nil, fmt.Errorf("failed to get top layer from image: %v", err) + } + } + + dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen) + if err != nil { + return nil, err + } + + topImage, err := newV1TopImage(imgID, img, l, parent) + if err != nil { + return nil, err + } + + imageListForThisTag = append(dependencyImages, topImage) + + return +} + +func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage, err error) { + if l == nil { + return nil, nil, nil + } + + imageListForThisTag, parent, err = generateDependencyImages(l.Parent(), dependenciesSeen) + + if dependenciesSeen != nil { + if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { + // This layer is already on the list, we can ignore it + // and all its parents. + return imageListForThisTag, dependencyImage, nil + } + } + + dependencyImage, err := newV1DependencyImage(l, parent) + if err != nil { + return nil, nil, err + } + imageListForThisTag = append(imageListForThisTag, dependencyImage) + + if dependenciesSeen != nil { + dependenciesSeen[l.ChainID()] = dependencyImage + } + + return imageListForThisTag, dependencyImage, nil +} + +// createImageIndex returns an index of an image's layer IDs and tags. +func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { + var imageIndex []*registry.ImgData + for _, img := range images { + v1ID := img.V1ID() + + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + if tags, hasTags := tags[topImage.imageID]; hasTags { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: tag, + }) + } + continue + } + } + + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is associated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: "", + }) + } + return imageIndex +} + +// lookupImageOnEndpoint checks the specified endpoint to see if an image exists +// and if it is absent then it sends the image id to the channel to be pushed. +func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { + defer wg.Done() + for image := range images { + v1ID := image.V1ID() + truncID := stringid.TruncateID(image.Layer().DiffID().String()) + if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { + logrus.Errorf("Error in LookupRemoteImage: %s", err) + imagesToPush <- v1ID + progress.Update(p.config.ProgressOutput, truncID, "Waiting") + } else { + progress.Update(p.config.ProgressOutput, truncID, "Already exists") + } + } +} + +func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { + workerCount := len(imageList) + // start a maximum of 5 workers to check if images exist on the specified endpoint. + if workerCount > 5 { + workerCount = 5 + } + var ( + wg = &sync.WaitGroup{} + imageData = make(chan v1Image, workerCount*2) + imagesToPush = make(chan string, workerCount*2) + pushes = make(chan map[string]struct{}, 1) + ) + for i := 0; i < workerCount; i++ { + wg.Add(1) + go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) + } + // start a go routine that consumes the images to push + go func() { + shouldPush := make(map[string]struct{}) + for id := range imagesToPush { + shouldPush[id] = struct{}{} + } + pushes <- shouldPush + }() + for _, v1Image := range imageList { + imageData <- v1Image + } + // close the channel to notify the workers that there will be no more images to check. + close(imageData) + wg.Wait() + close(imagesToPush) + // wait for all the images that require pushes to be collected into a consumable map. + shouldPush := <-pushes + // finish by pushing any images and tags to the endpoint. The order that the images are pushed + // is very important that is why we are still iterating over the ordered list of imageIDs. + for _, img := range imageList { + v1ID := img.V1ID() + if _, push := shouldPush[v1ID]; push { + if _, err := p.pushImage(ctx, img, endpoint); err != nil { + // FIXME: Continue on error? + return err + } + } + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + for _, tag := range tags[topImage.imageID] { + progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+p.repoInfo.RemoteName()+"/tags/"+tag) + if err := p.session.PushRegistryTag(p.repoInfo, v1ID, tag, endpoint); err != nil { + return err + } + } + } + } + return nil +} + +// pushRepository pushes layers that do not already exist on the registry. +func (p *v1Pusher) pushRepository(ctx context.Context) error { + imgList, tags, referencedLayers, err := p.getImageList() + defer func() { + for _, l := range referencedLayers { + p.config.LayerStore.Release(l) + } + }() + if err != nil { + return err + } + + imageIndex := createImageIndex(imgList, tags) + for _, data := range imageIndex { + logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err := p.session.PushImageJSONIndex(p.repoInfo, imageIndex, false, nil) + if err != nil { + return err + } + // push the repository to each of the endpoints only if it does not exist. + for _, endpoint := range repoData.Endpoints { + if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { + return err + } + } + _, err = p.session.PushImageJSONIndex(p.repoInfo, imageIndex, true, repoData.Endpoints) + return err +} + +func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { + l := v1Image.Layer() + v1ID := v1Image.V1ID() + truncID := stringid.TruncateID(l.DiffID().String()) + + jsonRaw := v1Image.Config() + progress.Update(p.config.ProgressOutput, truncID, "Pushing") + + // General rule is to use ID for graph accesses and compatibilityID for + // calls to session.registry() + imgData := ®istry.ImgData{ + ID: v1ID, + } + + // Send the json + if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { + if err == registry.ErrAlreadyExists { + progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") + return "", nil + } + return "", err + } + + arch, err := l.TarStream() + if err != nil { + return "", err + } + defer arch.Close() + + // don't care if this fails; best effort + size, _ := l.DiffSize() + + // Send the layer + logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") + defer reader.Close() + + checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { + return "", err + } + + if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { + logrus.Warnf("Could not set v1 ID mapping: %v", err) + } + + progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") + return imgData.Checksum, nil +} diff --git a/vendor/github.com/docker/docker/distribution/push_v2.go b/vendor/github.com/docker/docker/distribution/push_v2.go new file mode 100644 index 00000000..e86badb4 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push_v2.go @@ -0,0 +1,438 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest digest.Digest + Size int +} + +type v2Pusher struct { + v2MetadataService *metadata.V2MetadataService + ref reference.Named + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + repo distribution.Repository + + // pushState is state built by the Upload functions. + pushState pushState +} + +type pushState struct { + sync.Mutex + // remoteLayers is the set of layers known to exist on the remote side. + // This avoids redundant queries when pushing multiple tags that + // involve the same layers. It is also used to fill in digest and size + // information when building the manifest. + remoteLayers map[layer.DiffID]distribution.Descriptor + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Pusher) Push(ctx context.Context) (err error) { + p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) + + p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pushV2Repository(ctx); err != nil { + if continueOnError(err) { + return fallbackError{ + err: err, + confirmedV2: p.pushState.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { + if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { + imageID, err := p.config.ReferenceStore.Get(p.ref) + if err != nil { + return fmt.Errorf("tag does not exist: %s", p.ref.String()) + } + + return p.pushV2Tag(ctx, namedTagged, imageID) + } + + if !reference.IsNameOnly(p.ref) { + return errors.New("cannot push a digest reference") + } + + // Pull all tags + pushed := 0 + for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { + if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { + pushed++ + if err := p.pushV2Tag(ctx, namedTagged, association.ImageID); err != nil { + return err + } + } + } + + if pushed == 0 { + return fmt.Errorf("no tags to push for %s", p.repoInfo.Name()) + } + + return nil +} + +func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, imageID image.ID) error { + logrus.Debugf("Pushing repository: %s", ref.String()) + + img, err := p.config.ImageStore.Get(imageID) + if err != nil { + return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) + } + + var l layer.Layer + + topLayerID := img.RootFS.ChainID() + if topLayerID == "" { + l = layer.EmptyLayer + } else { + l, err = p.config.LayerStore.Get(topLayerID) + if err != nil { + return fmt.Errorf("failed to get top layer from image: %v", err) + } + defer layer.ReleaseAndLog(p.config.LayerStore, l) + } + + var descriptors []xfer.UploadDescriptor + + descriptorTemplate := v2PushDescriptor{ + v2MetadataService: p.v2MetadataService, + repoInfo: p.repoInfo, + repo: p.repo, + pushState: &p.pushState, + } + + // Loop bounds condition is to avoid pushing the base layer on Windows. + for i := 0; i < len(img.RootFS.DiffIDs); i++ { + descriptor := descriptorTemplate + descriptor.layer = l + descriptors = append(descriptors, &descriptor) + + l = l.Parent() + } + + if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { + return err + } + + // Try schema2 first + builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), img.RawJSON()) + manifest, err := manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return err + } + + putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) + + manifestRef, err := distreference.WithTag(p.repo.Named(), ref.Tag()) + if err != nil { + return err + } + builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, img.RawJSON()) + manifest, err = manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + return err + } + } + + var canonicalManifest []byte + + switch v := manifest.(type) { + case *schema1.SignedManifest: + canonicalManifest = v.Canonical + case *schema2.DeserializedManifest: + _, canonicalManifest, err = v.Payload() + if err != nil { + return err + } + } + + manifestDigest := digest.FromBytes(canonicalManifest) + progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) + // Signal digest to the trust client so it can sign the + // push, if appropriate. + progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)}) + + return nil +} + +func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { + // descriptors is in reverse order; iterate backwards to get references + // appended in the right order. + for i := len(descriptors) - 1; i >= 0; i-- { + if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { + return nil, err + } + } + + return builder.Build(ctx) +} + +type v2PushDescriptor struct { + layer layer.Layer + v2MetadataService *metadata.V2MetadataService + repoInfo reference.Named + repo distribution.Repository + pushState *pushState + remoteDescriptor distribution.Descriptor +} + +func (pd *v2PushDescriptor) Key() string { + return "v2push:" + pd.repo.Named().Name() + " " + pd.layer.DiffID().String() +} + +func (pd *v2PushDescriptor) ID() string { + return stringid.TruncateID(pd.layer.DiffID().String()) +} + +func (pd *v2PushDescriptor) DiffID() layer.DiffID { + return pd.layer.DiffID() +} + +func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + diffID := pd.DiffID() + + pd.pushState.Lock() + if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { + // it is already known that the push is not needed and + // therefore doing a stat is unnecessary + pd.pushState.Unlock() + progress.Update(progressOutput, pd.ID(), "Layer already exists") + return descriptor, nil + } + pd.pushState.Unlock() + + // Do we have any metadata associated with this layer's DiffID? + v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) + if err == nil { + descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState) + if err != nil { + progress.Update(progressOutput, pd.ID(), "Image push failed") + return distribution.Descriptor{}, retryOnError(err) + } + if exists { + progress.Update(progressOutput, pd.ID(), "Layer already exists") + pd.pushState.Lock() + pd.pushState.remoteLayers[diffID] = descriptor + pd.pushState.Unlock() + return descriptor, nil + } + } + + logrus.Debugf("Pushing layer: %s", diffID) + + // if digest was empty or not saved, or if blob does not exist on the remote repository, + // then push the blob. + bs := pd.repo.Blobs(ctx) + + var layerUpload distribution.BlobWriter + mountAttemptsRemaining := 3 + + // Attempt to find another repository in the same registry to mount the layer + // from to avoid an unnecessary upload. + // Note: metadata is stored from oldest to newest, so we iterate through this + // slice in reverse to maximize our chances of the blob still existing in the + // remote repository. + for i := len(v2Metadata) - 1; i >= 0 && mountAttemptsRemaining > 0; i-- { + mountFrom := v2Metadata[i] + + sourceRepo, err := reference.ParseNamed(mountFrom.SourceRepository) + if err != nil { + continue + } + if pd.repoInfo.Hostname() != sourceRepo.Hostname() { + // don't mount blobs from another registry + continue + } + + namedRef, err := reference.WithName(mountFrom.SourceRepository) + if err != nil { + continue + } + + // TODO (brianbland): We need to construct a reference where the Name is + // only the full remote name, so clean this up when distribution has a + // richer reference package + remoteRef, err := distreference.WithName(namedRef.RemoteName()) + if err != nil { + continue + } + + canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest) + if err != nil { + continue + } + + logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountFrom.Digest, sourceRepo.FullName()) + + layerUpload, err = bs.Create(ctx, client.WithMountFrom(canonicalRef)) + switch err := err.(type) { + case distribution.ErrBlobMounted: + progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) + + err.Descriptor.MediaType = schema2.MediaTypeLayer + + pd.pushState.Lock() + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = err.Descriptor + pd.pushState.Unlock() + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + return err.Descriptor, nil + case nil: + // blob upload session created successfully, so begin the upload + mountAttemptsRemaining = 0 + default: + // unable to mount layer from this repository, so this source mapping is no longer valid + logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository) + pd.v2MetadataService.Remove(mountFrom) + mountAttemptsRemaining-- + } + } + + if layerUpload == nil { + layerUpload, err = bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + } + defer layerUpload.Close() + + arch, err := pd.layer.TarStream() + if err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + + // don't care if this fails; best effort + size, _ := pd.layer.DiffSize() + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing") + compressedReader, compressionDone := compress(reader) + defer func() { + reader.Close() + <-compressionDone + }() + + digester := digest.Canonical.New() + tee := io.TeeReader(compressedReader, digester.Hash()) + + nn, err := layerUpload.ReadFrom(tee) + compressedReader.Close() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + pushDigest := digester.Digest() + if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) + progress.Update(progressOutput, pd.ID(), "Pushed") + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + + pd.pushState.Lock() + + // If Commit succeeded, that's an indication that the remote registry + // speaks the v2 protocol. + pd.pushState.confirmedV2 = true + + descriptor := distribution.Descriptor{ + Digest: pushDigest, + MediaType: schema2.MediaTypeLayer, + Size: nn, + } + pd.pushState.remoteLayers[diffID] = descriptor + + pd.pushState.Unlock() + + return descriptor, nil +} + +func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { + pd.remoteDescriptor = descriptor +} + +func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { + return pd.remoteDescriptor +} + +// layerAlreadyExists checks if the registry already know about any of the +// metadata passed in the "metadata" slice. If it finds one that the registry +// knows about, it returns the known digest and "true". +func layerAlreadyExists(ctx context.Context, metadata []metadata.V2Metadata, repoInfo reference.Named, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) { + for _, meta := range metadata { + // Only check blobsums that are known to this repository or have an unknown source + if meta.SourceRepository != "" && meta.SourceRepository != repoInfo.FullName() { + continue + } + descriptor, err := repo.Blobs(ctx).Stat(ctx, meta.Digest) + switch err { + case nil: + descriptor.MediaType = schema2.MediaTypeLayer + return descriptor, true, nil + case distribution.ErrBlobUnknown: + // nop + default: + return distribution.Descriptor{}, false, err + } + } + return distribution.Descriptor{}, false, nil +} diff --git a/vendor/github.com/docker/docker/distribution/registry.go b/vendor/github.com/docker/docker/distribution/registry.go new file mode 100644 index 00000000..f768a14d --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/registry.go @@ -0,0 +1,130 @@ +package distribution + +import ( + "fmt" + "net" + "net/http" + "net/url" + "time" + + "github.com/docker/distribution" + distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +type dumbCredentialStore struct { + auth *types.AuthConfig +} + +func (dcs dumbCredentialStore) Basic(*url.URL) (string, string) { + return dcs.auth.Username, dcs.auth.Password +} + +func (dcs dumbCredentialStore) RefreshToken(*url.URL, string) string { + return dcs.auth.IdentityToken +} + +func (dcs dumbCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +// NewV2Repository returns a repository (v2 only). It creates a HTTP transport +// providing timeout settings and authentication support, and also verifies the +// remote API version. +func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { + repoName := repoInfo.FullName() + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = repoInfo.RemoteName() + } + + // TODO(dmcgowan): Call close idle connections when complete, use keep alive + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders) + authTransport := transport.NewTransport(base, modifiers...) + + challengeManager, foundVersion, err := registry.PingV2Registry(endpoint, authTransport) + if err != nil { + transportOK := false + if responseErr, ok := err.(registry.PingResponseError); ok { + transportOK = true + err = responseErr.Err + } + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: transportOK, + } + } + + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + creds := dumbCredentialStore{auth: authConfig} + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{ + auth.RepositoryScope{ + Repository: repoName, + Actions: actions, + }, + }, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + tr := transport.NewTransport(base, modifiers...) + + repoNameRef, err := distreference.ParseNamed(repoName) + if err != nil { + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + + repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr) + if err != nil { + err = fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + return +} + +type existingTokenHandler struct { + token string +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/download.go b/vendor/github.com/docker/docker/distribution/xfer/download.go new file mode 100644 index 00000000..739c427c --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/download.go @@ -0,0 +1,430 @@ +package xfer + +import ( + "errors" + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxDownloadAttempts = 5 + +// LayerDownloadManager figures out which layers need to be downloaded, then +// registers and downloads those, taking into account dependencies between +// layers. +type LayerDownloadManager struct { + layerStore layer.Store + tm TransferManager +} + +// NewLayerDownloadManager returns a new LayerDownloadManager. +func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int) *LayerDownloadManager { + return &LayerDownloadManager{ + layerStore: layerStore, + tm: NewTransferManager(concurrencyLimit), + } +} + +type downloadTransfer struct { + Transfer + + layerStore layer.Store + layer layer.Layer + err error +} + +// result returns the layer resulting from the download, if the download +// and registration were successful. +func (d *downloadTransfer) result() (layer.Layer, error) { + return d.layer, d.err +} + +// A DownloadDescriptor references a layer that may need to be downloaded. +type DownloadDescriptor interface { + // Key returns the key used to deduplicate downloads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer, or an error + // if it is unknown (for example, if it has not been downloaded + // before). + DiffID() (layer.DiffID, error) + // Download is called to perform the download. + Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) + // Close is called when the download manager is finished with this + // descriptor and will not call Download again or read from the reader + // that Download returned. + Close() +} + +// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an +// additional Registered method which gets called after a downloaded layer is +// registered. This allows the user of the download manager to know the DiffID +// of each registered layer. This method is called if a cast to +// DownloadDescriptorWithRegistered is successful. +type DownloadDescriptorWithRegistered interface { + DownloadDescriptor + Registered(diffID layer.DiffID) +} + +// Download is a blocking function which ensures the requested layers are +// present in the layer store. It uses the string returned by the Key method to +// deduplicate downloads. If a given layer is not already known to present in +// the layer store, and the key is not used by an in-progress download, the +// Download method is called to get the layer tar data. Layers are then +// registered in the appropriate order. The caller must call the returned +// release function once it is is done with the returned RootFS object. +func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + var ( + topLayer layer.Layer + topDownload *downloadTransfer + watcher *Watcher + missingLayer bool + transferKey = "" + downloadsByKey = make(map[string]*downloadTransfer) + ) + + rootFS := initialRootFS + for _, descriptor := range layers { + key := descriptor.Key() + transferKey += key + + if !missingLayer { + missingLayer = true + diffID, err := descriptor.DiffID() + if err == nil { + getRootFS := rootFS + getRootFS.Append(diffID) + l, err := ldm.layerStore.Get(getRootFS.ChainID()) + if err == nil { + // Layer already exists. + logrus.Debugf("Layer already exists: %s", descriptor.ID()) + progress.Update(progressOutput, descriptor.ID(), "Already exists") + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + topLayer = l + missingLayer = false + rootFS.Append(diffID) + continue + } + } + } + + // Does this layer have the same data as a previous layer in + // the stack? If so, avoid downloading it more than once. + var topDownloadUncasted Transfer + if existingDownload, ok := downloadsByKey[key]; ok { + xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload) + defer topDownload.Transfer.Release(watcher) + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + continue + } + + // Layer is not known to exist - download and register it. + progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") + + var xferFunc DoFunc + if topDownload != nil { + xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload) + defer topDownload.Transfer.Release(watcher) + } else { + xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil) + } + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + downloadsByKey[key] = topDownload + } + + if topDownload == nil { + return rootFS, func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }, nil + } + + // Won't be using the list built up so far - will generate it + // from downloaded layers instead. + rootFS.DiffIDs = []layer.DiffID{} + + defer func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }() + + select { + case <-ctx.Done(): + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, ctx.Err() + case <-topDownload.Done(): + break + } + + l, err := topDownload.result() + if err != nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, err + } + + // Must do this exactly len(layers) times, so we don't include the + // base layer on Windows. + for range layers { + if l == nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, errors.New("internal error: too few parent layers") + } + rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) + l = l.Parent() + } + return rootFS, func() { topDownload.Transfer.Release(watcher) }, err +} + +// makeDownloadFunc returns a function that performs the layer download and +// registration. If parentDownload is non-nil, it waits for that download to +// complete before the registration step, and registers the downloaded data +// on top of parentDownload's resulting layer. Otherwise, it registers the +// layer on top of the ChainID given by parentLayer. +func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + if parentDownload != nil { + // Did the parent download already fail or get + // cancelled? + select { + case <-parentDownload.Done(): + _, err := parentDownload.result() + if err != nil { + d.err = err + return + } + default: + } + } + + var ( + downloadReader io.ReadCloser + size int64 + err error + retries int + ) + + defer descriptor.Close() + + for { + downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) + if err == nil { + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-d.Transfer.Context().Done(): + d.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { + logrus.Errorf("Download failed: %v", err) + d.err = err + return + } + + logrus.Errorf("Download failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(time.Second) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-d.Transfer.Context().Done(): + ticker.Stop() + d.err = errors.New("download cancelled during retry delay") + return + } + + } + } + + close(inactive) + + if parentDownload != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + downloadReader.Close() + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + downloadReader.Close() + return + } + parentLayer = l.ChainID() + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") + defer reader.Close() + + inflatedLayerData, err := archive.DecompressStream(reader) + if err != nil { + d.err = fmt.Errorf("could not get decompression stream: %v", err) + return + } + + d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) + if err != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + default: + d.err = fmt.Errorf("failed to register layer: %v", err) + } + return + } + + progress.Update(progressOutput, descriptor.ID(), "Pull complete") + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} + +// makeDownloadFuncFromDownload returns a function that performs the layer +// registration when the layer data is coming from an existing download. It +// waits for sourceDownload and parentDownload to complete, and then +// reregisters the data from sourceDownload's top layer on top of +// parentDownload. This function does not log progress output because it would +// interfere with the progress reporting for sourceDownload, which has the same +// Key. +func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + <-start + + close(inactive) + + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + return + } + parentLayer := l.ChainID() + + // sourceDownload should have already finished if + // parentDownload finished, but wait for it explicitly + // to be sure. + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-sourceDownload.Done(): + } + + l, err = sourceDownload.result() + if err != nil { + d.err = err + return + } + + layerReader, err := l.TarStream() + if err != nil { + d.err = err + return + } + defer layerReader.Close() + + d.layer, err = d.layerStore.Register(layerReader, parentLayer) + if err != nil { + d.err = fmt.Errorf("failed to register layer: %v", err) + return + } + + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/transfer.go b/vendor/github.com/docker/docker/distribution/xfer/transfer.go new file mode 100644 index 00000000..dd83f8b8 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/transfer.go @@ -0,0 +1,392 @@ +package xfer + +import ( + "runtime" + "sync" + + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +// DoNotRetry is an error wrapper indicating that the error cannot be resolved +// with a retry. +type DoNotRetry struct { + Err error +} + +// Error returns the stringified representation of the encapsulated error. +func (e DoNotRetry) Error() string { + return e.Err.Error() +} + +// Watcher is returned by Watch and can be passed to Release to stop watching. +type Watcher struct { + // signalChan is used to signal to the watcher goroutine that + // new progress information is available, or that the transfer + // has finished. + signalChan chan struct{} + // releaseChan signals to the watcher goroutine that the watcher + // should be detached. + releaseChan chan struct{} + // running remains open as long as the watcher is watching the + // transfer. It gets closed if the transfer finishes or the + // watcher is detached. + running chan struct{} +} + +// Transfer represents an in-progress transfer. +type Transfer interface { + Watch(progressOutput progress.Output) *Watcher + Release(*Watcher) + Context() context.Context + Close() + Done() <-chan struct{} + Released() <-chan struct{} + Broadcast(masterProgressChan <-chan progress.Progress) +} + +type transfer struct { + mu sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + // watchers keeps track of the goroutines monitoring progress output, + // indexed by the channels that release them. + watchers map[chan struct{}]*Watcher + + // lastProgress is the most recently received progress event. + lastProgress progress.Progress + // hasLastProgress is true when lastProgress has been set. + hasLastProgress bool + + // running remains open as long as the transfer is in progress. + running chan struct{} + // released stays open until all watchers release the transfer and + // the transfer is no longer tracked by the transfer manager. + released chan struct{} + + // broadcastDone is true if the master progress channel has closed. + broadcastDone bool + // closed is true if Close has been called + closed bool + // broadcastSyncChan allows watchers to "ping" the broadcasting + // goroutine to wait for it for deplete its input channel. This ensures + // a detaching watcher won't miss an event that was sent before it + // started detaching. + broadcastSyncChan chan struct{} +} + +// NewTransfer creates a new transfer. +func NewTransfer() Transfer { + t := &transfer{ + watchers: make(map[chan struct{}]*Watcher), + running: make(chan struct{}), + released: make(chan struct{}), + broadcastSyncChan: make(chan struct{}), + } + + // This uses context.Background instead of a caller-supplied context + // so that a transfer won't be cancelled automatically if the client + // which requested it is ^C'd (there could be other viewers). + t.ctx, t.cancel = context.WithCancel(context.Background()) + + return t +} + +// Broadcast copies the progress and error output to all viewers. +func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { + for { + var ( + p progress.Progress + ok bool + ) + select { + case p, ok = <-masterProgressChan: + default: + // We've depleted the channel, so now we can handle + // reads on broadcastSyncChan to let detaching watchers + // know we're caught up. + select { + case <-t.broadcastSyncChan: + continue + case p, ok = <-masterProgressChan: + } + } + + t.mu.Lock() + if ok { + t.lastProgress = p + t.hasLastProgress = true + for _, w := range t.watchers { + select { + case w.signalChan <- struct{}{}: + default: + } + } + } else { + t.broadcastDone = true + } + t.mu.Unlock() + if !ok { + close(t.running) + return + } + } +} + +// Watch adds a watcher to the transfer. The supplied channel gets progress +// updates and is closed when the transfer finishes. +func (t *transfer) Watch(progressOutput progress.Output) *Watcher { + t.mu.Lock() + defer t.mu.Unlock() + + w := &Watcher{ + releaseChan: make(chan struct{}), + signalChan: make(chan struct{}), + running: make(chan struct{}), + } + + t.watchers[w.releaseChan] = w + + if t.broadcastDone { + close(w.running) + return w + } + + go func() { + defer func() { + close(w.running) + }() + var ( + done bool + lastWritten progress.Progress + hasLastWritten bool + ) + for { + t.mu.Lock() + hasLastProgress := t.hasLastProgress + lastProgress := t.lastProgress + t.mu.Unlock() + + // Make sure we don't write the last progress item + // twice. + if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { + progressOutput.WriteProgress(lastProgress) + lastWritten = lastProgress + hasLastWritten = true + } + + if done { + return + } + + select { + case <-w.signalChan: + case <-w.releaseChan: + done = true + // Since the watcher is going to detach, make + // sure the broadcaster is caught up so we + // don't miss anything. + select { + case t.broadcastSyncChan <- struct{}{}: + case <-t.running: + } + case <-t.running: + done = true + } + } + }() + + return w +} + +// Release is the inverse of Watch; indicating that the watcher no longer wants +// to be notified about the progress of the transfer. All calls to Watch must +// be paired with later calls to Release so that the lifecycle of the transfer +// is properly managed. +func (t *transfer) Release(watcher *Watcher) { + t.mu.Lock() + delete(t.watchers, watcher.releaseChan) + + if len(t.watchers) == 0 { + if t.closed { + // released may have been closed already if all + // watchers were released, then another one was added + // while waiting for a previous watcher goroutine to + // finish. + select { + case <-t.released: + default: + close(t.released) + } + } else { + t.cancel() + } + } + t.mu.Unlock() + + close(watcher.releaseChan) + // Block until the watcher goroutine completes + <-watcher.running +} + +// Done returns a channel which is closed if the transfer completes or is +// cancelled. Note that having 0 watchers causes a transfer to be cancelled. +func (t *transfer) Done() <-chan struct{} { + // Note that this doesn't return t.ctx.Done() because that channel will + // be closed the moment Cancel is called, and we need to return a + // channel that blocks until a cancellation is actually acknowledged by + // the transfer function. + return t.running +} + +// Released returns a channel which is closed once all watchers release the +// transfer AND the transfer is no longer tracked by the transfer manager. +func (t *transfer) Released() <-chan struct{} { + return t.released +} + +// Context returns the context associated with the transfer. +func (t *transfer) Context() context.Context { + return t.ctx +} + +// Close is called by the transfer manager when the transfer is no longer +// being tracked. +func (t *transfer) Close() { + t.mu.Lock() + t.closed = true + if len(t.watchers) == 0 { + close(t.released) + } + t.mu.Unlock() +} + +// DoFunc is a function called by the transfer manager to actually perform +// a transfer. It should be non-blocking. It should wait until the start channel +// is closed before transferring any data. If the function closes inactive, that +// signals to the transfer manager that the job is no longer actively moving +// data - for example, it may be waiting for a dependent transfer to finish. +// This prevents it from taking up a slot. +type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer + +// TransferManager is used by LayerDownloadManager and LayerUploadManager to +// schedule and deduplicate transfers. It is up to the TransferManager +// implementation to make the scheduling and concurrency decisions. +type TransferManager interface { + // Transfer checks if a transfer with the given key is in progress. If + // so, it returns progress and error output from that transfer. + // Otherwise, it will call xferFunc to initiate the transfer. + Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) +} + +type transferManager struct { + mu sync.Mutex + + concurrencyLimit int + activeTransfers int + transfers map[string]Transfer + waitingTransfers []chan struct{} +} + +// NewTransferManager returns a new TransferManager. +func NewTransferManager(concurrencyLimit int) TransferManager { + return &transferManager{ + concurrencyLimit: concurrencyLimit, + transfers: make(map[string]Transfer), + } +} + +// Transfer checks if a transfer matching the given key is in progress. If not, +// it starts one by calling xferFunc. The caller supplies a channel which +// receives progress output from the transfer. +func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { + tm.mu.Lock() + defer tm.mu.Unlock() + + for { + xfer, present := tm.transfers[key] + if !present { + break + } + // Transfer is already in progress. + watcher := xfer.Watch(progressOutput) + + select { + case <-xfer.Context().Done(): + // We don't want to watch a transfer that has been cancelled. + // Wait for it to be removed from the map and try again. + xfer.Release(watcher) + tm.mu.Unlock() + // The goroutine that removes this transfer from the + // map is also waiting for xfer.Done(), so yield to it. + // This could be avoided by adding a Closed method + // to Transfer to allow explicitly waiting for it to be + // removed the map, but forcing a scheduling round in + // this very rare case seems better than bloating the + // interface definition. + runtime.Gosched() + <-xfer.Done() + tm.mu.Lock() + default: + return xfer, watcher + } + } + + start := make(chan struct{}) + inactive := make(chan struct{}) + + if tm.activeTransfers < tm.concurrencyLimit { + close(start) + tm.activeTransfers++ + } else { + tm.waitingTransfers = append(tm.waitingTransfers, start) + } + + masterProgressChan := make(chan progress.Progress) + xfer := xferFunc(masterProgressChan, start, inactive) + watcher := xfer.Watch(progressOutput) + go xfer.Broadcast(masterProgressChan) + tm.transfers[key] = xfer + + // When the transfer is finished, remove from the map. + go func() { + for { + select { + case <-inactive: + tm.mu.Lock() + tm.inactivate(start) + tm.mu.Unlock() + inactive = nil + case <-xfer.Done(): + tm.mu.Lock() + if inactive != nil { + tm.inactivate(start) + } + delete(tm.transfers, key) + tm.mu.Unlock() + xfer.Close() + return + } + } + }() + + return xfer, watcher +} + +func (tm *transferManager) inactivate(start chan struct{}) { + // If the transfer was started, remove it from the activeTransfers + // count. + select { + case <-start: + // Start next transfer if any are waiting + if len(tm.waitingTransfers) != 0 { + close(tm.waitingTransfers[0]) + tm.waitingTransfers = tm.waitingTransfers[1:] + } else { + tm.activeTransfers-- + } + default: + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/upload.go b/vendor/github.com/docker/docker/distribution/xfer/upload.go new file mode 100644 index 00000000..563824c1 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/upload.go @@ -0,0 +1,163 @@ +package xfer + +import ( + "errors" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadAttempts = 5 + +// LayerUploadManager provides task management and progress reporting for +// uploads. +type LayerUploadManager struct { + tm TransferManager +} + +// NewLayerUploadManager returns a new LayerUploadManager. +func NewLayerUploadManager(concurrencyLimit int) *LayerUploadManager { + return &LayerUploadManager{ + tm: NewTransferManager(concurrencyLimit), + } +} + +type uploadTransfer struct { + Transfer + + remoteDescriptor distribution.Descriptor + err error +} + +// An UploadDescriptor references a layer that may need to be uploaded. +type UploadDescriptor interface { + // Key returns the key used to deduplicate uploads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer. + DiffID() layer.DiffID + // Upload is called to perform the Upload. + Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) + // SetRemoteDescriptor provides the distribution.Descriptor that was + // returned by Upload. This descriptor is not to be confused with + // the UploadDescriptor interface, which is used for internally + // identifying layers that are being uploaded. + SetRemoteDescriptor(descriptor distribution.Descriptor) +} + +// Upload is a blocking function which ensures the listed layers are present on +// the remote registry. It uses the string returned by the Key method to +// deduplicate uploads. +func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { + var ( + uploads []*uploadTransfer + dedupDescriptors = make(map[string]*uploadTransfer) + ) + + for _, descriptor := range layers { + progress.Update(progressOutput, descriptor.ID(), "Preparing") + + key := descriptor.Key() + if _, present := dedupDescriptors[key]; present { + continue + } + + xferFunc := lum.makeUploadFunc(descriptor) + upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) + defer upload.Release(watcher) + uploads = append(uploads, upload.(*uploadTransfer)) + dedupDescriptors[key] = upload.(*uploadTransfer) + } + + for _, upload := range uploads { + select { + case <-ctx.Done(): + return ctx.Err() + case <-upload.Transfer.Done(): + if upload.err != nil { + return upload.err + } + } + } + for _, l := range layers { + l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) + } + + return nil +} + +func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + u := &uploadTransfer{ + Transfer: NewTransfer(), + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + retries := 0 + for { + remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) + if err == nil { + u.remoteDescriptor = remoteDescriptor + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-u.Transfer.Context().Done(): + u.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { + logrus.Errorf("Upload failed: %v", err) + u.err = err + return + } + + logrus.Errorf("Upload failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(time.Second) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-u.Transfer.Context().Done(): + ticker.Stop() + u.err = errors.New("upload cancelled during retry delay") + return + } + } + } + }() + + return u + } +} diff --git a/vendor/github.com/docker/docker/docker/README.md b/vendor/github.com/docker/docker/docker/README.md new file mode 100644 index 00000000..015bc133 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/README.md @@ -0,0 +1,3 @@ +docker.go contains Docker's main function. + +This file provides first line CLI argument parsing and environment variable setting. diff --git a/vendor/github.com/docker/docker/docker/client.go b/vendor/github.com/docker/docker/docker/client.go new file mode 100644 index 00000000..487d4458 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/client.go @@ -0,0 +1,33 @@ +package docker + +import ( + "path/filepath" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils" +) + +var clientFlags = &cli.ClientFlags{FlagSet: new(flag.FlagSet), Common: commonFlags} + +func init() { + client := clientFlags.FlagSet + client.StringVar(&clientFlags.ConfigDir, []string{"-config"}, cliconfig.ConfigDir(), "Location of client config files") + + clientFlags.PostParse = func() { + clientFlags.Common.PostParse() + + if clientFlags.ConfigDir != "" { + cliconfig.SetConfigDir(clientFlags.ConfigDir) + } + + if clientFlags.Common.TrustKey == "" { + clientFlags.Common.TrustKey = filepath.Join(cliconfig.ConfigDir(), defaultTrustKeyFile) + } + + if clientFlags.Common.Debug { + utils.EnableDebug() + } + } +} diff --git a/vendor/github.com/docker/docker/docker/common.go b/vendor/github.com/docker/docker/docker/common.go new file mode 100644 index 00000000..1615ef25 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/common.go @@ -0,0 +1,100 @@ +package docker + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + defaultTrustKeyFile = "key.json" + defaultCaFile = "ca.pem" + defaultKeyFile = "key.pem" + defaultCertFile = "cert.pem" + tlsVerifyKey = "tlsverify" +) + +var ( + commonFlags = &cli.CommonFlags{FlagSet: new(flag.FlagSet)} + + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" +) + +func init() { + if dockerCertPath == "" { + dockerCertPath = cliconfig.ConfigDir() + } + + commonFlags.PostParse = postParseCommon + + cmd := commonFlags.FlagSet + + cmd.BoolVar(&commonFlags.Debug, []string{"D", "-debug"}, false, "Enable debug mode") + cmd.StringVar(&commonFlags.LogLevel, []string{"l", "-log-level"}, "info", "Set the logging level") + cmd.BoolVar(&commonFlags.TLS, []string{"-tls"}, false, "Use TLS; implied by --tlsverify") + cmd.BoolVar(&commonFlags.TLSVerify, []string{"-tlsverify"}, dockerTLSVerify, "Use TLS and verify the remote") + + // TODO use flag flag.String([]string{"i", "-identity"}, "", "Path to libtrust key file") + + var tlsOptions tlsconfig.Options + commonFlags.TLSOptions = &tlsOptions + cmd.StringVar(&tlsOptions.CAFile, []string{"-tlscacert"}, filepath.Join(dockerCertPath, defaultCaFile), "Trust certs signed only by this CA") + cmd.StringVar(&tlsOptions.CertFile, []string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file") + cmd.StringVar(&tlsOptions.KeyFile, []string{"-tlskey"}, filepath.Join(dockerCertPath, defaultKeyFile), "Path to TLS key file") + + cmd.Var(opts.NewNamedListOptsRef("hosts", &commonFlags.Hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to") +} + +func postParseCommon() { + cmd := commonFlags.FlagSet + + setDaemonLogLevel(commonFlags.LogLevel) + + // Regardless of whether the user sets it to true or false, if they + // specify --tlsverify at all then we need to turn on tls + // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need to check that here as well + if cmd.IsSet("-"+tlsVerifyKey) || commonFlags.TLSVerify { + commonFlags.TLS = true + } + + if !commonFlags.TLS { + commonFlags.TLSOptions = nil + } else { + tlsOptions := commonFlags.TLSOptions + tlsOptions.InsecureSkipVerify = !commonFlags.TLSVerify + + // Reset CertFile and KeyFile to empty string if the user did not specify + // the respective flags and the respective default files were not found. + if !cmd.IsSet("-tlscert") { + if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { + tlsOptions.CertFile = "" + } + } + if !cmd.IsSet("-tlskey") { + if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { + tlsOptions.KeyFile = "" + } + } + } +} + +func setDaemonLogLevel(logLevel string) { + if logLevel != "" { + lvl, err := logrus.ParseLevel(logLevel) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) + os.Exit(1) + } + logrus.SetLevel(lvl) + } else { + logrus.SetLevel(logrus.InfoLevel) + } +} diff --git a/vendor/github.com/docker/docker/docker/daemon.go b/vendor/github.com/docker/docker/docker/daemon.go new file mode 100644 index 00000000..7a0b53f4 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon.go @@ -0,0 +1,417 @@ +// +build daemon + +package docker + +import ( + "crypto/tls" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/build" + "github.com/docker/docker/api/server/router/container" + "github.com/docker/docker/api/server/router/image" + systemrouter "github.com/docker/docker/api/server/router/system" + "github.com/docker/docker/api/server/router/volume" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/docker/listeners" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonlog" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/pidfile" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + daemonUsage = " docker daemon [ --help | ... ]\n" + daemonConfigFileFlag = "-config-file" +) + +var ( + daemonCli cli.Handler = NewDaemonCli() +) + +// DaemonCli represents the daemon CLI. +type DaemonCli struct { + *daemon.Config + flags *flag.FlagSet +} + +func presentInHelp(usage string) string { return usage } +func absentFromHelp(string) string { return "" } + +// NewDaemonCli returns a pre-configured daemon CLI +func NewDaemonCli() *DaemonCli { + daemonFlags := cli.Subcmd("daemon", nil, "Enable daemon mode", true) + + // TODO(tiborvass): remove InstallFlags? + daemonConfig := new(daemon.Config) + daemonConfig.LogConfig.Config = make(map[string]string) + daemonConfig.ClusterOpts = make(map[string]string) + + if runtime.GOOS != "linux" { + daemonConfig.V2Only = true + } + + daemonConfig.InstallFlags(daemonFlags, presentInHelp) + daemonConfig.InstallFlags(flag.CommandLine, absentFromHelp) + daemonFlags.Require(flag.Exact, 0) + + return &DaemonCli{ + Config: daemonConfig, + flags: daemonFlags, + } +} + +func migrateKey() (err error) { + // Migrate trust key if exists at ~/.docker/key.json and owned by current user + oldPath := filepath.Join(cliconfig.ConfigDir(), defaultTrustKeyFile) + newPath := filepath.Join(getDaemonConfDir(), defaultTrustKeyFile) + if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) { + defer func() { + // Ensure old path is removed if no error occurred + if err == nil { + err = os.Remove(oldPath) + } else { + logrus.Warnf("Key migration failed, key file not removed at %s", oldPath) + os.Remove(newPath) + } + }() + + if err := system.MkdirAll(getDaemonConfDir(), os.FileMode(0644)); err != nil { + return fmt.Errorf("Unable to create daemon configuration directory: %s", err) + } + + newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return fmt.Errorf("error creating key file %q: %s", newPath, err) + } + defer newFile.Close() + + oldFile, err := os.Open(oldPath) + if err != nil { + return fmt.Errorf("error opening key file %q: %s", oldPath, err) + } + defer oldFile.Close() + + if _, err := io.Copy(newFile, oldFile); err != nil { + return fmt.Errorf("error copying key: %s", err) + } + + logrus.Infof("Migrated key from %s to %s", oldPath, newPath) + } + + return nil +} + +func getGlobalFlag() (globalFlag *flag.Flag) { + defer func() { + if x := recover(); x != nil { + switch f := x.(type) { + case *flag.Flag: + globalFlag = f + default: + panic(x) + } + } + }() + visitor := func(f *flag.Flag) { panic(f) } + commonFlags.FlagSet.Visit(visitor) + clientFlags.FlagSet.Visit(visitor) + return +} + +// CmdDaemon is the daemon command, called the raw arguments after `docker daemon`. +func (cli *DaemonCli) CmdDaemon(args ...string) error { + // warn from uuid package when running the daemon + uuid.Loggerf = logrus.Warnf + + if !commonFlags.FlagSet.IsEmpty() || !clientFlags.FlagSet.IsEmpty() { + // deny `docker -D daemon` + illegalFlag := getGlobalFlag() + fmt.Fprintf(os.Stderr, "invalid flag '-%s'.\nSee 'docker daemon --help'.\n", illegalFlag.Names[0]) + os.Exit(1) + } else { + // allow new form `docker daemon -D` + flag.Merge(cli.flags, commonFlags.FlagSet) + } + + configFile := cli.flags.String([]string{daemonConfigFileFlag}, defaultDaemonConfigFile, "Daemon configuration file") + + cli.flags.ParseFlags(args, true) + commonFlags.PostParse() + + if commonFlags.TrustKey == "" { + commonFlags.TrustKey = filepath.Join(getDaemonConfDir(), defaultTrustKeyFile) + } + cliConfig, err := loadDaemonCliConfig(cli.Config, cli.flags, commonFlags, *configFile) + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + cli.Config = cliConfig + + if cli.Config.Debug { + utils.EnableDebug() + } + + if utils.ExperimentalBuild() { + logrus.Warn("Running experimental build") + } + + logrus.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: jsonlog.RFC3339NanoFixed, + DisableColors: cli.Config.RawLogs, + }) + + if err := setDefaultUmask(); err != nil { + logrus.Fatalf("Failed to set umask: %v", err) + } + + if len(cli.LogConfig.Config) > 0 { + if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil { + logrus.Fatalf("Failed to set log opts: %v", err) + } + } + + var pfile *pidfile.PIDFile + if cli.Pidfile != "" { + pf, err := pidfile.New(cli.Pidfile) + if err != nil { + logrus.Fatalf("Error starting daemon: %v", err) + } + pfile = pf + defer func() { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + }() + } + + serverConfig := &apiserver.Config{ + AuthorizationPluginNames: cli.Config.AuthorizationPlugins, + Logging: true, + SocketGroup: cli.Config.SocketGroup, + Version: dockerversion.Version, + } + serverConfig = setPlatformServerConfig(serverConfig, cli.Config) + + if cli.Config.TLS { + tlsOptions := tlsconfig.Options{ + CAFile: cli.Config.CommonTLSOptions.CAFile, + CertFile: cli.Config.CommonTLSOptions.CertFile, + KeyFile: cli.Config.CommonTLSOptions.KeyFile, + } + + if cli.Config.TLSVerify { + // server requires and verifies client's certificate + tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert + } + tlsConfig, err := tlsconfig.Server(tlsOptions) + if err != nil { + logrus.Fatal(err) + } + serverConfig.TLSConfig = tlsConfig + } + + if len(cli.Config.Hosts) == 0 { + cli.Config.Hosts = make([]string, 1) + } + + api := apiserver.New(serverConfig) + + for i := 0; i < len(cli.Config.Hosts); i++ { + var err error + if cli.Config.Hosts[i], err = opts.ParseHost(cli.Config.TLS, cli.Config.Hosts[i]); err != nil { + logrus.Fatalf("error parsing -H %s : %v", cli.Config.Hosts[i], err) + } + + protoAddr := cli.Config.Hosts[i] + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + logrus.Fatalf("bad format %s, expected PROTO://ADDR", protoAddr) + } + l, err := listeners.Init(protoAddrParts[0], protoAddrParts[1], serverConfig.SocketGroup, serverConfig.TLSConfig) + if err != nil { + logrus.Fatal(err) + } + + logrus.Debugf("Listener created for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) + api.Accept(protoAddrParts[1], l...) + } + + if err := migrateKey(); err != nil { + logrus.Fatal(err) + } + cli.TrustKeyPath = commonFlags.TrustKey + + registryService := registry.NewService(cli.Config.ServiceOptions) + + containerdRemote, err := libcontainerd.New(filepath.Join(cli.Config.ExecRoot, "libcontainerd"), cli.getPlatformRemoteOptions()...) + if err != nil { + logrus.Fatal(err) + } + + d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote) + if err != nil { + if pfile != nil { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + } + logrus.Fatalf("Error starting daemon: %v", err) + } + + logrus.Info("Daemon has completed initialization") + + logrus.WithFields(logrus.Fields{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, + "graphdriver": d.GraphDriverName(), + }).Info("Docker daemon") + + initRouter(api, d) + + reload := func(config *daemon.Config) { + if err := d.Reload(config); err != nil { + logrus.Errorf("Error reconfiguring the daemon: %v", err) + return + } + if config.IsValueSet("debug") { + debugEnabled := utils.IsDebugEnabled() + switch { + case debugEnabled && !config.Debug: // disable debug + utils.DisableDebug() + api.DisableProfiler() + case config.Debug && !debugEnabled: // enable debug + utils.EnableDebug() + api.EnableProfiler() + } + + } + } + + setupConfigReloadTrap(*configFile, cli.flags, reload) + + // The serve API routine never exits unless an error occurs + // We need to start it as a goroutine and wait on it so + // daemon doesn't exit + serveAPIWait := make(chan error) + go api.Wait(serveAPIWait) + + signal.Trap(func() { + api.Close() + <-serveAPIWait + shutdownDaemon(d, 15) + if pfile != nil { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + } + }) + + // after the daemon is done setting up we can notify systemd api + notifySystem() + + // Daemon is fully initialized and handling API traffic + // Wait for serve API to complete + errAPI := <-serveAPIWait + shutdownDaemon(d, 15) + containerdRemote.Cleanup() + if errAPI != nil { + if pfile != nil { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + } + logrus.Fatalf("Shutting down due to ServeAPI error: %v", errAPI) + } + return nil +} + +// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case +// d.Shutdown() is waiting too long to kill container or worst it's +// blocked there +func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) { + ch := make(chan struct{}) + go func() { + d.Shutdown() + close(ch) + }() + select { + case <-ch: + logrus.Debug("Clean shutdown succeeded") + case <-time.After(timeout * time.Second): + logrus.Error("Force shutdown daemon") + } +} + +func loadDaemonCliConfig(config *daemon.Config, daemonFlags *flag.FlagSet, commonConfig *cli.CommonFlags, configFile string) (*daemon.Config, error) { + config.Debug = commonConfig.Debug + config.Hosts = commonConfig.Hosts + config.LogLevel = commonConfig.LogLevel + config.TLS = commonConfig.TLS + config.TLSVerify = commonConfig.TLSVerify + config.CommonTLSOptions = daemon.CommonTLSOptions{} + + if commonConfig.TLSOptions != nil { + config.CommonTLSOptions.CAFile = commonConfig.TLSOptions.CAFile + config.CommonTLSOptions.CertFile = commonConfig.TLSOptions.CertFile + config.CommonTLSOptions.KeyFile = commonConfig.TLSOptions.KeyFile + } + + if configFile != "" { + c, err := daemon.MergeDaemonConfigurations(config, daemonFlags, configFile) + if err != nil { + if daemonFlags.IsSet(daemonConfigFileFlag) || !os.IsNotExist(err) { + return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", configFile, err) + } + } + // the merged configuration can be nil if the config file didn't exist. + // leave the current configuration as it is if when that happens. + if c != nil { + config = c + } + } + + // Regardless of whether the user sets it to true or false, if they + // specify TLSVerify at all then we need to turn on TLS + if config.IsValueSet(tlsVerifyKey) { + config.TLS = true + } + + // ensure that the log level is the one set after merging configurations + setDaemonLogLevel(config.LogLevel) + + return config, nil +} + +func initRouter(s *apiserver.Server, d *daemon.Daemon) { + routers := []router.Router{ + container.NewRouter(d), + image.NewRouter(d), + systemrouter.NewRouter(d), + volume.NewRouter(d), + build.NewRouter(dockerfile.NewBuildManager(d)), + } + + s.InitRouter(utils.IsDebugEnabled(), routers...) +} diff --git a/vendor/github.com/docker/docker/docker/daemon_freebsd.go b/vendor/github.com/docker/docker/docker/daemon_freebsd.go new file mode 100644 index 00000000..013f0e91 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon_freebsd.go @@ -0,0 +1,7 @@ +// +build daemon + +package docker + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} diff --git a/vendor/github.com/docker/docker/docker/daemon_linux.go b/vendor/github.com/docker/docker/docker/daemon_linux.go new file mode 100644 index 00000000..3ff9a49e --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon_linux.go @@ -0,0 +1,13 @@ +// +build daemon + +package docker + +import ( + systemdDaemon "github.com/coreos/go-systemd/daemon" +) + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { + // Tell the init daemon we are accepting requests + go systemdDaemon.SdNotify("READY=1") +} diff --git a/vendor/github.com/docker/docker/docker/daemon_none.go b/vendor/github.com/docker/docker/docker/daemon_none.go new file mode 100644 index 00000000..b2cbfd41 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon_none.go @@ -0,0 +1,13 @@ +// +build !daemon + +package docker + +import "github.com/docker/docker/cli" + +const daemonUsage = "" + +var daemonCli cli.Handler + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} diff --git a/vendor/github.com/docker/docker/docker/daemon_unix.go b/vendor/github.com/docker/docker/docker/daemon_unix.go new file mode 100644 index 00000000..8e212f8e --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon_unix.go @@ -0,0 +1,82 @@ +// +build daemon,!windows + +package docker + +import ( + "fmt" + "os" + "os/signal" + "syscall" + + "github.com/Sirupsen/logrus" + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/daemon" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/system" +) + +const defaultDaemonConfigFile = "/etc/docker/daemon.json" + +func setPlatformServerConfig(serverConfig *apiserver.Config, daemonCfg *daemon.Config) *apiserver.Config { + serverConfig.EnableCors = daemonCfg.EnableCors + serverConfig.CorsHeaders = daemonCfg.CorsHeaders + + return serverConfig +} + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { + if int(fileInfo.UID()) == os.Getuid() { + return true + } + } + return false +} + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir() string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func setupConfigReloadTrap(configFile string, flags *mflag.FlagSet, reload func(*daemon.Config)) { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + go func() { + for range c { + if err := daemon.ReloadConfiguration(configFile, flags, reload); err != nil { + logrus.Error(err) + } + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{ + libcontainerd.WithDebugLog(cli.Config.Debug), + } + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + if daemon.UsingSystemd(cli.Config) { + args := []string{"--systemd-cgroup=true"} + opts = append(opts, libcontainerd.WithRuntimeArgs(args)) + } + return opts +} diff --git a/vendor/github.com/docker/docker/docker/docker.go b/vendor/github.com/docker/docker/docker/docker.go new file mode 100644 index 00000000..cbb1e4cf --- /dev/null +++ b/vendor/github.com/docker/docker/docker/docker.go @@ -0,0 +1,77 @@ +package docker + +import ( + "fmt" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/client" + "github.com/docker/docker/cli" + "github.com/docker/docker/dockerversion" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" +) + +func Main() { + // Set terminal emulation based on platform as required. + stdin, stdout, stderr := term.StdStreams() + + logrus.SetOutput(stderr) + + flag.Merge(flag.CommandLine, clientFlags.FlagSet, commonFlags.FlagSet) + + flag.Usage = func() { + fmt.Fprint(stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n"+daemonUsage+" docker [ --help | -v | --version ]\n\n") + fmt.Fprint(stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") + + flag.CommandLine.SetOutput(stdout) + flag.PrintDefaults() + + help := "\nCommands:\n" + + for _, cmd := range dockerCommands { + help += fmt.Sprintf(" %-10.10s%s\n", cmd.Name, cmd.Description) + } + + help += "\nRun 'docker COMMAND --help' for more information on a command." + fmt.Fprintf(stdout, "%s\n", help) + } + + flag.Parse() + + if *flVersion { + showVersion() + return + } + + if *flHelp { + // if global flag --help is present, regardless of what other options and commands there are, + // just print the usage. + flag.Usage() + return + } + + clientCli := client.NewDockerCli(stdin, stdout, stderr, clientFlags) + + c := cli.New(clientCli, daemonCli) + if err := c.Run(flag.Args()...); err != nil { + if sterr, ok := err.(cli.StatusError); ok { + if sterr.Status != "" { + fmt.Fprintln(stderr, sterr.Status) + os.Exit(1) + } + os.Exit(sterr.StatusCode) + } + fmt.Fprintln(stderr, err) + os.Exit(1) + } +} + +func showVersion() { + if utils.ExperimentalBuild() { + fmt.Printf("Docker version %s, build %s, experimental\n", dockerversion.Version, dockerversion.GitCommit) + } else { + fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) + } +} diff --git a/vendor/github.com/docker/docker/docker/flags.go b/vendor/github.com/docker/docker/docker/flags.go new file mode 100644 index 00000000..fdff7b47 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/flags.go @@ -0,0 +1,30 @@ +package docker + +import ( + "sort" + + "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +var ( + flHelp = flag.Bool([]string{"h", "-help"}, false, "Print usage") + flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") +) + +type byName []cli.Command + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { return a[i].Name < a[j].Name } + +var dockerCommands []cli.Command + +// TODO(tiborvass): do not show 'daemon' on client-only binaries + +func init() { + for _, cmd := range cli.DockerCommands { + dockerCommands = append(dockerCommands, cmd) + } + sort.Sort(byName(dockerCommands)) +} diff --git a/vendor/github.com/docker/docker/docker/listeners/listeners.go b/vendor/github.com/docker/docker/docker/listeners/listeners.go new file mode 100644 index 00000000..6ddac0da --- /dev/null +++ b/vendor/github.com/docker/docker/docker/listeners/listeners.go @@ -0,0 +1,19 @@ +package listeners + +import ( + "crypto/tls" + "net" + + "github.com/Sirupsen/logrus" + "github.com/docker/go-connections/sockets" +) + +func initTCPSocket(addr string, tlsConfig *tls.Config) (l net.Listener, err error) { + if tlsConfig == nil || tlsConfig.ClientAuth != tls.RequireAndVerifyClientCert { + logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + } + if l, err = sockets.NewTCPSocket(addr, tlsConfig); err != nil { + return nil, err + } + return +} diff --git a/vendor/github.com/docker/docker/docker/listeners/listeners_unix.go b/vendor/github.com/docker/docker/docker/listeners/listeners_unix.go new file mode 100644 index 00000000..548cb78e --- /dev/null +++ b/vendor/github.com/docker/docker/docker/listeners/listeners_unix.go @@ -0,0 +1,89 @@ +// +build !windows + +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/activation" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) { + switch proto { + case "fd": + ls, err = listenFD(addr, tlsConfig) + if err != nil { + return nil, err + } + case "tcp": + l, err := initTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + l, err := sockets.NewUnixSocket(addr, socketGroup) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("Invalid protocol format: %q", proto) + } + + return +} + +// listenFD returns the specified socket activated files as a slice of +// net.Listeners or all of the activated files if "*" is given. +func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { + var ( + err error + listeners []net.Listener + ) + // socket activation + if tlsConfig != nil { + listeners, err = activation.TLSListeners(false, tlsConfig) + } else { + listeners, err = activation.Listeners(false) + } + if err != nil { + return nil, err + } + + if len(listeners) == 0 { + return nil, fmt.Errorf("No sockets found. Make sure the docker daemon was started by systemd.") + } + + // default to all fds just like unix:// and tcp:// + if addr == "" || addr == "*" { + return listeners, nil + } + + fdNum, err := strconv.Atoi(addr) + if err != nil { + return nil, fmt.Errorf("failed to parse systemd address, should be number: %v", err) + } + fdOffset := fdNum - 3 + if len(listeners) < int(fdOffset)+1 { + return nil, fmt.Errorf("Too few socket activated files passed in") + } + if listeners[fdOffset] == nil { + return nil, fmt.Errorf("failed to listen on systemd activated file at fd %d", fdOffset+3) + } + for i, ls := range listeners { + if i == fdOffset || ls == nil { + continue + } + if err := ls.Close(); err != nil { + logrus.Errorf("Failed to close systemd activated file at fd %d: %v", fdOffset+3, err) + } + } + return []net.Listener{listeners[fdOffset]}, nil +} diff --git a/vendor/github.com/docker/docker/docker/runc.go b/vendor/github.com/docker/docker/docker/runc.go new file mode 100644 index 00000000..dccbccbb --- /dev/null +++ b/vendor/github.com/docker/docker/docker/runc.go @@ -0,0 +1,12 @@ +// +build !exclude_runc + +package docker + +import ( + "github.com/docker/docker/pkg/reexec" + "github.com/opencontainers/runc" +) + +func init() { + reexec.Register("docker-runc", runc.Main) +} diff --git a/vendor/github.com/docker/docker/dockerversion/useragent.go b/vendor/github.com/docker/docker/dockerversion/useragent.go new file mode 100644 index 00000000..d2a891c4 --- /dev/null +++ b/vendor/github.com/docker/docker/dockerversion/useragent.go @@ -0,0 +1,74 @@ +package dockerversion + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/useragent" + "golang.org/x/net/context" +) + +// DockerUserAgent is the User-Agent the Docker client uses to identify itself. +// In accordance with RFC 7231 (5.5.3) is of the form: +// [docker client's UA] UpstreamClient([upstream client's UA]) +func DockerUserAgent(ctx context.Context) string { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) + + dockerUA := useragent.AppendVersions("", httpVersion...) + upstreamUA := getUserAgentFromContext(ctx) + if len(upstreamUA) > 0 { + ret := insertUpstreamUserAgent(upstreamUA, dockerUA) + return ret + } + return dockerUA +} + +// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists +func getUserAgentFromContext(ctx context.Context) string { + var upstreamUA string + if ctx != nil { + var ki interface{} = ctx.Value(httputils.UAStringKey) + if ki != nil { + upstreamUA = ctx.Value(httputils.UAStringKey).(string) + } + } + return upstreamUA +} + +// escapeStr returns s with every rune in charsToEscape escaped by a backslash +func escapeStr(s string, charsToEscape string) string { + var ret string + for _, currRune := range s { + appended := false + for _, escapeableRune := range charsToEscape { + if currRune == escapeableRune { + ret += `\` + string(currRune) + appended = true + break + } + } + if !appended { + ret += string(currRune) + } + } + return ret +} + +// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent +// string of the form: +// $dockerUA UpstreamClient($upstreamUA) +func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { + charsToEscape := `();\` + upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) + return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) +} diff --git a/vendor/github.com/docker/docker/dockerversion/version_lib.go b/vendor/github.com/docker/docker/dockerversion/version_lib.go index 1d5862d3..6644bce2 100644 --- a/vendor/github.com/docker/docker/dockerversion/version_lib.go +++ b/vendor/github.com/docker/docker/dockerversion/version_lib.go @@ -9,8 +9,5 @@ const ( GitCommit string = "library-import" Version string = "library-import" BuildTime string = "library-import" - IAmStatic string = "library-import" - InitSHA1 string = "library-import" - InitPath string = "library-import" ) diff --git a/vendor/github.com/docker/docker/errors/README.md b/vendor/github.com/docker/docker/errors/README.md deleted file mode 100644 index 81fa04cc..00000000 --- a/vendor/github.com/docker/docker/errors/README.md +++ /dev/null @@ -1,58 +0,0 @@ -Docker 'errors' package -======================= - -This package contains all of the error messages generated by the Docker -engine that might be exposed via the Docker engine's REST API. - -Each top-level engine package will have its own file in this directory -so that there's a clear grouping of errors, instead of just one big -file. The errors for each package are defined here instead of within -their respective package structure so that Docker CLI code that may need -to import these error definition files will not need to know or understand -the engine's package/directory structure. In other words, all they should -need to do is import `.../docker/errors` and they will automatically -pick up all Docker engine defined errors. This also gives the engine -developers the freedom to change the engine packaging structure (e.g. to -CRUD packages) without worrying about breaking existing clients. - -These errors are defined using the 'errcode' package. The `errcode` package -allows for each error to be typed and include all information necessary to -have further processing done on them if necessary. In particular, each error -includes: - -* Value - a unique string (in all caps) associated with this error. -Typically, this string is the same name as the variable name of the error -(w/o the `ErrorCode` text) but in all caps. - -* Message - the human readable sentence that will be displayed for this -error. It can contain '%s' substitutions that allows for the code generating -the error to specify values that will be inserted in the string prior to -being displayed to the end-user. The `WithArgs()` function can be used to -specify the insertion strings. Note, the evaluation of the strings will be -done at the time `WithArgs()` is called. - -* Description - additional human readable text to further explain the -circumstances of the error situation. - -* HTTPStatusCode - when the error is returned back to a CLI, this value -will be used to populate the HTTP status code. If not present the default -value will be `StatusInternalServerError`, 500. - -Not all errors generated within the engine's executable will be propagated -back to the engine's API layer. For example, it is expected that errors -generated by vendored code (under `docker/vendor`) and packaged code -(under `docker/pkg`) will be converted into errors defined by this package. - -When processing an errcode error, if you are looking for a particular -error then you can do something like: - -``` -import derr "github.com/docker/docker/errors" - -... - -err := someFunc() -if err.ErrorCode() == derr.ErrorCodeNoSuchContainer { - ... -} -``` diff --git a/vendor/github.com/docker/docker/errors/builder.go b/vendor/github.com/docker/docker/errors/builder.go deleted file mode 100644 index 38d0d3c3..00000000 --- a/vendor/github.com/docker/docker/errors/builder.go +++ /dev/null @@ -1,93 +0,0 @@ -package errors - -// This file contains all of the errors that can be generated from the -// docker/builder component. - -import ( - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -var ( - // ErrorCodeAtLeastOneArg is generated when the parser comes across a - // Dockerfile command that doesn't have any args. - ErrorCodeAtLeastOneArg = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "ATLEASTONEARG", - Message: "%s requires at least one argument", - Description: "The specified command requires at least one argument", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeExactlyOneArg is generated when the parser comes across a - // Dockerfile command that requires exactly one arg but got less/more. - ErrorCodeExactlyOneArg = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXACTLYONEARG", - Message: "%s requires exactly one argument", - Description: "The specified command requires exactly one argument", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeAtLeastTwoArgs is generated when the parser comes across a - // Dockerfile command that requires at least two args but got less. - ErrorCodeAtLeastTwoArgs = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "ATLEASTTWOARGS", - Message: "%s requires at least two arguments", - Description: "The specified command requires at least two arguments", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeTooManyArgs is generated when the parser comes across a - // Dockerfile command that has more args than it should - ErrorCodeTooManyArgs = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "TOOMANYARGS", - Message: "Bad input to %s, too many args", - Description: "The specified command was passed too many arguments", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeChainOnBuild is generated when the parser comes across a - // Dockerfile command that is trying to chain ONBUILD commands. - ErrorCodeChainOnBuild = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CHAINONBUILD", - Message: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", - Description: "ONBUILD Dockerfile commands aren't allow on ONBUILD commands", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeBadOnBuildCmd is generated when the parser comes across a - // an ONBUILD Dockerfile command with an invalid trigger/command. - ErrorCodeBadOnBuildCmd = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BADONBUILDCMD", - Message: "%s isn't allowed as an ONBUILD trigger", - Description: "The specified ONBUILD command isn't allowed", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeMissingFrom is generated when the Dockerfile is missing - // a FROM command. - ErrorCodeMissingFrom = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MISSINGFROM", - Message: "Please provide a source image with `from` prior to run", - Description: "The Dockerfile is missing a FROM command", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNotOnWindows is generated when the specified Dockerfile - // command is not supported on Windows. - ErrorCodeNotOnWindows = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOTONWINDOWS", - Message: "%s is not supported on Windows", - Description: "The specified Dockerfile command is not supported on Windows", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeEmpty is generated when the specified Volume string - // is empty. - ErrorCodeVolumeEmpty = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEEMPTY", - Message: "Volume specified can not be an empty string", - Description: "The specified volume can not be an empty string", - HTTPStatusCode: http.StatusInternalServerError, - }) -) diff --git a/vendor/github.com/docker/docker/errors/daemon.go b/vendor/github.com/docker/docker/errors/daemon.go deleted file mode 100644 index d406d271..00000000 --- a/vendor/github.com/docker/docker/errors/daemon.go +++ /dev/null @@ -1,951 +0,0 @@ -package errors - -// This file contains all of the errors that can be generated from the -// docker/daemon component. - -import ( - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -var ( - // ErrorCodeNoSuchContainer is generated when we look for a container by - // name or ID and we can't find it. - ErrorCodeNoSuchContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOSUCHCONTAINER", - Message: "no such id: %s", - Description: "The specified container can not be found", - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeUnregisteredContainer is generated when we try to load - // a storage driver for an unregistered container - ErrorCodeUnregisteredContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "UNREGISTEREDCONTAINER", - Message: "Can't load storage driver for unregistered container %s", - Description: "An attempt was made to load the storage driver for a container that is not registered with the daemon", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeContainerBeingRemoved is generated when an attempt to start - // a container is made but its in the process of being removed, or is dead. - ErrorCodeContainerBeingRemoved = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CONTAINERBEINGREMOVED", - Message: "Container is marked for removal and cannot be started.", - Description: "An attempt was made to start a container that is in the process of being deleted", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnpauseContainer is generated when we attempt to stop a - // container but its paused. - ErrorCodeUnpauseContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "UNPAUSECONTAINER", - Message: "Container %s is paused. Unpause the container before stopping", - Description: "The specified container is paused, before it can be stopped it must be unpaused", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodePausedContainer is generated when we attempt to attach a - // container but its paused. - ErrorCodePausedContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CONTAINERPAUSED", - Message: "Container %s is paused. Unpause the container before attach", - Description: "The specified container is paused, unpause the container before attach", - HTTPStatusCode: http.StatusConflict, - }) - // ErrorCodeAlreadyPaused is generated when we attempt to pause a - // container when its already paused. - ErrorCodeAlreadyPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "ALREADYPAUSED", - Message: "Container %s is already paused", - Description: "The specified container is already in the paused state", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNotPaused is generated when we attempt to unpause a - // container when its not paused. - ErrorCodeNotPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOTPAUSED", - Message: "Container %s is not paused", - Description: "The specified container can not be unpaused because it is not in a paused state", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeImageUnregContainer is generated when we attempt to get the - // image of an unknown/unregistered container. - ErrorCodeImageUnregContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "IMAGEUNREGCONTAINER", - Message: "Can't get image of unregistered container", - Description: "An attempt to retrieve the image of a container was made but the container is not registered", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeEmptyID is generated when an ID is the emptry string. - ErrorCodeEmptyID = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EMPTYID", - Message: "Invalid empty id", - Description: "An attempt was made to register a container but the container's ID can not be an empty string", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeLoggingFactory is generated when we could not load the - // log driver. - ErrorCodeLoggingFactory = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "LOGGINGFACTORY", - Message: "Failed to get logging factory: %v", - Description: "An attempt was made to register a container but the container's ID can not be an empty string", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeInitLogger is generated when we could not initialize - // the logging driver. - ErrorCodeInitLogger = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "INITLOGGER", - Message: "Failed to initialize logging driver: %v", - Description: "An error occurred while trying to initialize the logging driver", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNotRunning is generated when we need to verify that - // a container is running, but its not. - ErrorCodeNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOTRUNNING", - Message: "Container %s is not running", - Description: "The specified action can not be taken due to the container not being in a running state", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeLinkNotRunning is generated when we try to link to a - // container that is not running. - ErrorCodeLinkNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "LINKNOTRUNNING", - Message: "Cannot link to a non running container: %s AS %s", - Description: "An attempt was made to link to a container but the container is not in a running state", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeDeviceInfo is generated when there is an error while trying - // to get info about a custom device. - // container that is not running. - ErrorCodeDeviceInfo = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DEVICEINFO", - Message: "error gathering device information while adding custom device %q: %s", - Description: "There was an error while trying to retrieve the information about a custom device", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeEmptyEndpoint is generated when the endpoint for a port - // map is nil. - ErrorCodeEmptyEndpoint = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EMPTYENDPOINT", - Message: "invalid endpoint while building port map info", - Description: "The specified endpoint for the port mapping is empty", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeEmptyNetwork is generated when the networkSettings for a port - // map is nil. - ErrorCodeEmptyNetwork = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EMPTYNETWORK", - Message: "invalid networksettings while building port map info", - Description: "The specified endpoint for the port mapping is empty", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeParsingPort is generated when there is an error parsing - // a "port" string. - ErrorCodeParsingPort = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "PARSINGPORT", - Message: "Error parsing Port value(%v):%v", - Description: "There was an error while trying to parse the specified 'port' value", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNoSandbox is generated when we can't find the specified - // sandbox(network) by ID. - ErrorCodeNoSandbox = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOSANDBOX", - Message: "error locating sandbox id %s: %v", - Description: "There was an error trying to located the specified networking sandbox", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNetworkUpdate is generated when there is an error while - // trying update a network/sandbox config. - ErrorCodeNetworkUpdate = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NETWORKUPDATE", - Message: "Update network failed: %v", - Description: "There was an error trying to update the configuration information of the specified network sandbox", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNetworkRefresh is generated when there is an error while - // trying refresh a network/sandbox config. - ErrorCodeNetworkRefresh = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NETWORKREFRESH", - Message: "Update network failed: Failure in refresh sandbox %s: %v", - Description: "There was an error trying to refresh the configuration information of the specified network sandbox", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeHostPort is generated when there was an error while trying - // to parse a "host/port" string. - ErrorCodeHostPort = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "HOSTPORT", - Message: "Error parsing HostPort value(%s):%v", - Description: "There was an error trying to parse the specified 'HostPort' value", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNetworkConflict is generated when we try to publish a service - // in network mode. - ErrorCodeNetworkConflict = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NETWORKCONFLICT", - Message: "conflicting options: publishing a service and network mode", - Description: "It is not possible to publish a service when it is in network mode", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeJoinInfo is generated when we failed to update a container's - // join info. - ErrorCodeJoinInfo = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "JOININFO", - Message: "Updating join info failed: %v", - Description: "There was an error during an attempt update a container's join information", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeIPCRunning is generated when we try to join a container's - // IPC but its not running. - ErrorCodeIPCRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "IPCRUNNING", - Message: "cannot join IPC of a non running container: %s", - Description: "An attempt was made to join the IPC of a container, but the container is not running", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNotADir is generated when we try to create a directory - // but the path isn't a dir. - ErrorCodeNotADir = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOTADIR", - Message: "Cannot mkdir: %s is not a directory", - Description: "An attempt was made create a directory, but the location in which it is being created is not a directory", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeParseContainer is generated when the reference to a - // container doesn't include a ":" (another container). - ErrorCodeParseContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "PARSECONTAINER", - Message: "no container specified to join network", - Description: "The specified reference to a container is missing a ':' as a separator between 'container' and 'name'/'id'", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeJoinSelf is generated when we try to network to ourselves. - ErrorCodeJoinSelf = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "JOINSELF", - Message: "cannot join own network", - Description: "An attempt was made to have a container join its own network", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeJoinRunning is generated when we try to network to ourselves. - ErrorCodeJoinRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "JOINRUNNING", - Message: "cannot join network of a non running container: %s", - Description: "An attempt to join the network of a container, but that container isn't running", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeModeNotContainer is generated when we try to network to - // another container but the mode isn't 'container'. - ErrorCodeModeNotContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MODENOTCONTAINER", - Message: "network mode not set to container", - Description: "An attempt was made to connect to a container's network but the mode wasn't set to 'container'", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRemovingVolume is generated when we try remove a mount - // point (volume) but fail. - ErrorCodeRemovingVolume = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "REMOVINGVOLUME", - Message: "Error removing volumes:\n%v", - Description: "There was an error while trying to remove the mount point (volume) of a container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeInvalidNetworkMode is generated when an invalid network - // mode value is specified. - ErrorCodeInvalidNetworkMode = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "INVALIDNETWORKMODE", - Message: "invalid network mode: %s", - Description: "The specified networking mode is not valid", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeGetGraph is generated when there was an error while - // trying to find a graph/image. - ErrorCodeGetGraph = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "GETGRAPH", - Message: "Failed to graph.Get on ImageID %s - %s", - Description: "There was an error trying to retrieve the image for the specified image ID", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeGetLayer is generated when there was an error while - // trying to retrieve a particular layer of an image. - ErrorCodeGetLayer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "GETLAYER", - Message: "Failed to get layer path from graphdriver %s for ImageID %s - %s", - Description: "There was an error trying to retrieve the layer of the specified image", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodePutLayer is generated when there was an error while - // trying to 'put' a particular layer of an image. - ErrorCodePutLayer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "PUTLAYER", - Message: "Failed to put layer path from graphdriver %s for ImageID %s - %s", - Description: "There was an error trying to store a layer for the specified image", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeGetLayerMetadata is generated when there was an error while - // trying to retrieve the metadata of a layer of an image. - ErrorCodeGetLayerMetadata = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "GETLAYERMETADATA", - Message: "Failed to get layer metadata - %s", - Description: "There was an error trying to retrieve the metadata of a layer for the specified image", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeEmptyConfig is generated when the input config data - // is empty. - ErrorCodeEmptyConfig = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EMPTYCONFIG", - Message: "Config cannot be empty in order to create a container", - Description: "While trying to create a container, the specified configuration information was empty", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNoSuchImageHash is generated when we can't find the - // specified image by its hash - ErrorCodeNoSuchImageHash = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOSUCHIMAGEHASH", - Message: "No such image: %s", - Description: "An attempt was made to find an image by its hash, but the lookup failed", - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeNoSuchImageTag is generated when we can't find the - // specified image byt its name/tag. - ErrorCodeNoSuchImageTag = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOSUCHIMAGETAG", - Message: "No such image: %s:%s", - Description: "An attempt was made to find an image by its name/tag, but the lookup failed", - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeMountOverFile is generated when we try to mount a volume - // over an existing file (but not a dir). - ErrorCodeMountOverFile = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MOUNTOVERFILE", - Message: "cannot mount volume over existing file, file exists %s", - Description: "An attempt was made to mount a volume at the same location as a pre-existing file", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeMountSetup is generated when we can't define a mount point - // due to the source and destination being undefined. - ErrorCodeMountSetup = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MOUNTSETUP", - Message: "Unable to setup mount point, neither source nor volume defined", - Description: "An attempt was made to setup a mount point, but the source and destination are undefined", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeInvalidMode is generated when the mode of a volume/bind - // mount is invalid. - ErrorCodeVolumeInvalidMode = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEINVALIDMODE", - Message: "invalid mode: %q", - Description: "An invalid 'mode' was specified", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeInvalid is generated when the format fo the - // volume specification isn't valid. - ErrorCodeVolumeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEINVALID", - Message: "Invalid volume specification: '%s'", - Description: "An invalid 'volume' was specified in the mount request", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeAbs is generated when path to a volume isn't absolute. - ErrorCodeVolumeAbs = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEABS", - Message: "Invalid volume destination path: '%s' mount path must be absolute.", - Description: "An invalid 'destination' path was specified in the mount request, it must be an absolute path", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeName is generated when the name of named volume isn't valid. - ErrorCodeVolumeName = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUME_NAME_INVALID", - Message: "%q includes invalid characters for a local volume name, only %q are allowed", - Description: "The name of volume is invalid", - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeVolumeSlash is generated when destination path to a volume is / - ErrorCodeVolumeSlash = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMESLASH", - Message: "Invalid specification: destination can't be '/' in '%s'", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeDestIsC is generated the destination is c: (Windows specific) - ErrorCodeVolumeDestIsC = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEDESTISC", - Message: "Destination drive letter in '%s' cannot be c:", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeDestIsCRoot is generated the destination path is c:\ (Windows specific) - ErrorCodeVolumeDestIsCRoot = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEDESTISCROOT", - Message: `Destination path in '%s' cannot be c:\`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeSourceNotFound is generated the source directory could not be found (Windows specific) - ErrorCodeVolumeSourceNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMESOURCENOTFOUND", - Message: "Source directory '%s' could not be found: %s", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeSourceNotDirectory is generated the source is not a directory (Windows specific) - ErrorCodeVolumeSourceNotDirectory = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMESOURCENOTDIRECTORY", - Message: "Source '%s' is not a directory", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeFromBlank is generated when path to a volume is blank. - ErrorCodeVolumeFromBlank = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEFROMBLANK", - Message: "malformed volumes-from specification: %q", - Description: "An invalid 'destination' path was specified in the mount request, it must not be blank", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeDup is generated when we try to mount two volumes - // to the same path. - ErrorCodeVolumeDup = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEDUP", - Message: "Duplicate bind mount '%s'", - Description: "An attempt was made to mount a volume but the specified destination location is already used in a previous mount", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeNoSourceForMount is generated when no source directory - // for a volume mount was found. (Windows specific) - ErrorCodeVolumeNoSourceForMount = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMENOSOURCEFORMOUNT", - Message: "No source for mount name '%s' driver %q destination '%s'", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeNameReservedWord is generated when the name in a volume - // uses a reserved word for filenames. (Windows specific) - ErrorCodeVolumeNameReservedWord = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMENAMERESERVEDWORD", - Message: "Volume name %q cannot be a reserved word for Windows filenames", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeCantUnpause is generated when there's an error while trying - // to unpause a container. - ErrorCodeCantUnpause = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CANTUNPAUSE", - Message: "Cannot unpause container %s: %s", - Description: "An error occurred while trying to unpause the specified container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodePSError is generated when trying to run 'ps'. - ErrorCodePSError = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "PSError", - Message: "Error running ps: %s", - Description: "There was an error trying to run the 'ps' command in the specified container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNoPID is generated when looking for the PID field in the - // ps output. - ErrorCodeNoPID = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOPID", - Message: "Couldn't find PID field in ps output", - Description: "There was no 'PID' field in the output of the 'ps' command that was executed", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeBadPID is generated when we can't convert a PID to an int. - ErrorCodeBadPID = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BADPID", - Message: "Unexpected pid '%s': %s", - Description: "While trying to parse the output of the 'ps' command, the 'PID' field was not an integer", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNoTop is generated when we try to run 'top' but can't - // because we're on windows. - ErrorCodeNoTop = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOTOP", - Message: "Top is not supported on Windows", - Description: "The 'top' command is not supported on Windows", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeStopped is generated when we try to stop a container - // that is already stopped. - ErrorCodeStopped = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "STOPPED", - Message: "Container already stopped", - Description: "An attempt was made to stop a container, but the container is already stopped", - HTTPStatusCode: http.StatusNotModified, - }) - - // ErrorCodeCantStop is generated when we try to stop a container - // but failed for some reason. - ErrorCodeCantStop = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CANTSTOP", - Message: "Cannot stop container %s: %s\n", - Description: "An error occurred while tring to stop the specified container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeBadCPUFields is generated when the number of CPU fields is - // less than 8. - ErrorCodeBadCPUFields = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BADCPUFIELDS", - Message: "invalid number of cpu fields", - Description: "While reading the '/proc/stat' file, the number of 'cpu' fields is less than 8", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeBadCPUInt is generated the CPU field can't be parsed as an int. - ErrorCodeBadCPUInt = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BADCPUINT", - Message: "Unable to convert value %s to int: %s", - Description: "While reading the '/proc/stat' file, the 'CPU' field could not be parsed as an integer", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeBadStatFormat is generated the output of the stat info - // isn't parseable. - ErrorCodeBadStatFormat = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BADSTATFORMAT", - Message: "invalid stat format", - Description: "There was an error trying to parse the '/proc/stat' file", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeTimedOut is generated when a timer expires. - ErrorCodeTimedOut = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "TIMEDOUT", - Message: "Timed out: %v", - Description: "A timer expired", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeAlreadyRemoving is generated when we try to remove a - // container that is already being removed. - ErrorCodeAlreadyRemoving = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "ALREADYREMOVING", - Message: "Status is already RemovalInProgress", - Description: "An attempt to remove a container was made, but the container is already in the process of being removed", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeStartPaused is generated when we start a paused container. - ErrorCodeStartPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "STARTPAUSED", - Message: "Cannot start a paused container, try unpause instead.", - Description: "An attempt to start a container was made, but the container is paused. Unpause it first", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeAlreadyStarted is generated when we try to start a container - // that is already running. - ErrorCodeAlreadyStarted = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "ALREADYSTARTED", - Message: "Container already started", - Description: "An attempt to start a container was made, but the container is already started", - HTTPStatusCode: http.StatusNotModified, - }) - - // ErrorCodeHostConfigStart is generated when a HostConfig is passed - // into the start command. - ErrorCodeHostConfigStart = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "HOSTCONFIGSTART", - Message: "Supplying a hostconfig on start is not supported. It should be supplied on create", - Description: "The 'start' command does not accept 'HostConfig' data, try using the 'create' command instead", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeCantRestart is generated when an error occurred while - // trying to restart a container. - ErrorCodeCantRestart = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CANTRESTART", - Message: "Cannot restart container %s: %s", - Description: "There was an error while trying to restart a container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeEmptyRename is generated when one of the names on a - // rename is empty. - ErrorCodeEmptyRename = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EMPTYRENAME", - Message: "Neither old nor new names may be empty", - Description: "An attempt was made to rename a container but either the old or new names were blank", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRenameTaken is generated when we try to rename but the - // new name isn't available. - ErrorCodeRenameTaken = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RENAMETAKEN", - Message: "Error when allocating new name: %s", - Description: "The new name specified on the 'rename' command is already being used", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRenameDelete is generated when we try to rename but - // failed trying to delete the old container. - ErrorCodeRenameDelete = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RENAMEDELETE", - Message: "Failed to delete container %q: %v", - Description: "There was an error trying to delete the specified container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodePauseError is generated when we try to pause a container - // but failed. - ErrorCodePauseError = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "PAUSEERROR", - Message: "Cannot pause container %s: %s", - Description: "There was an error trying to pause the specified container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNeedStream is generated when we try to stream a container's - // logs but no output stream was specified. - ErrorCodeNeedStream = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NEEDSTREAM", - Message: "You must choose at least one stream", - Description: "While trying to stream a container's logs, no output stream was specified", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeDanglingOne is generated when we try to specify more than one - // 'dangling' specifier. - ErrorCodeDanglingOne = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DANLGINGONE", - Message: "Conflict: cannot use more than 1 value for `dangling` filter", - Description: "The specified 'dangling' filter may not have more than one value", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeImgDelUsed is generated when we try to delete an image - // but it is being used. - ErrorCodeImgDelUsed = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "IMGDELUSED", - Message: "conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", - Description: "An attempt was made to delete an image but it is currently being used", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeImgNoParent is generated when we try to find an image's - // parent but its not in the graph. - ErrorCodeImgNoParent = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "IMGNOPARENT", - Message: "unable to get parent image: %v", - Description: "There was an error trying to find an image's parent, it was not in the graph", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeExportFailed is generated when an export fails. - ErrorCodeExportFailed = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXPORTFAILED", - Message: "%s: %s", - Description: "There was an error during an export operation", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeExecResize is generated when we try to resize an exec - // but its not running. - ErrorCodeExecResize = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXECRESIZE", - Message: "Exec %s is not running, so it can not be resized.", - Description: "An attempt was made to resize an 'exec', but the 'exec' is not running", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeContainerNotRunning is generated when we try to get the info - // on an exec but the container is not running. - ErrorCodeContainerNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CONTAINERNOTRUNNING", - Message: "Container %s is not running: %s", - Description: "An attempt was made to retrieve the information about an 'exec' but the container is not running", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeNoExecID is generated when we try to get the info - // on an exec but it can't be found. - ErrorCodeNoExecID = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOEXECID", - Message: "No such exec instance '%s' found in daemon", - Description: "The specified 'exec' instance could not be found", - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeExecPaused is generated when we try to start an exec - // but the container is paused. - ErrorCodeExecPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXECPAUSED", - Message: "Container %s is paused, unpause the container before exec", - Description: "An attempt to start an 'exec' was made, but the owning container is paused", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeExecRunning is generated when we try to start an exec - // but its already running. - ErrorCodeExecRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXECRUNNING", - Message: "Error: Exec command %s is already running", - Description: "An attempt to start an 'exec' was made, but 'exec' is already running", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeExecCantRun is generated when we try to start an exec - // but it failed for some reason. - ErrorCodeExecCantRun = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXECCANTRUN", - Message: "Cannot run exec command %s in container %s: %s", - Description: "An attempt to start an 'exec' was made, but an error occurred", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeExecAttach is generated when we try to attach to an exec - // but failed. - ErrorCodeExecAttach = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXECATTACH", - Message: "attach failed with error: %s", - Description: "There was an error while trying to attach to an 'exec'", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeExecContainerStopped is generated when we try to start - // an exec but then the container stopped. - ErrorCodeExecContainerStopped = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXECCONTAINERSTOPPED", - Message: "container stopped while running exec", - Description: "An attempt was made to start an 'exec' but the owning container is in the 'stopped' state", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeDefaultName is generated when we try to delete the - // default name of a container. - ErrorCodeDefaultName = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DEFAULTNAME", - Message: "Conflict, cannot remove the default name of the container", - Description: "An attempt to delete the default name of a container was made, but that is not allowed", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeNoParent is generated when we try to delete a container - // but we can't find its parent image. - ErrorCodeNoParent = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOPARENT", - Message: "Cannot get parent %s for name %s", - Description: "An attempt was made to delete a container but its parent image could not be found", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeCantDestroy is generated when we try to delete a container - // but failed for some reason. - ErrorCodeCantDestroy = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CANTDESTROY", - Message: "Cannot destroy container %s: %v", - Description: "An attempt was made to delete a container but it failed", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmRunning is generated when we try to delete a container - // but its still running. - ErrorCodeRmRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMRUNNING", - Message: "Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f", - Description: "An attempt was made to delete a container but the container is still running, try to either stop it first or use '-f'", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeRmFailed is generated when we try to delete a container - // but it failed for some reason. - ErrorCodeRmFailed = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMFAILED", - Message: "Could not kill running container, cannot remove - %v", - Description: "An error occurred while trying to delete a running container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmNotFound is generated when we try to delete a container - // but couldn't find it. - ErrorCodeRmNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMNOTFOUND", - Message: "Could not kill running container, cannot remove - %v", - Description: "An attempt to delete a container was made but the container could not be found", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmState is generated when we try to delete a container - // but couldn't set its state to RemovalInProgress. - ErrorCodeRmState = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMSTATE", - Message: "Failed to set container state to RemovalInProgress: %s", - Description: "An attempt to delete a container was made, but there as an error trying to set its state to 'RemovalInProgress'", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmDriverFS is generated when we try to delete a container - // but the driver failed to delete its filesystem. - ErrorCodeRmDriverFS = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMDRIVERFS", - Message: "Driver %s failed to remove root filesystem %s: %s", - Description: "While trying to delete a container, the driver failed to remove the root filesystem", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmInit is generated when we try to delete a container - // but failed deleting its init filesystem. - ErrorCodeRmInit = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMINIT", - Message: "Driver %s failed to remove init filesystem %s: %s", - Description: "While trying to delete a container, the driver failed to remove the init filesystem", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmFS is generated when we try to delete a container - // but failed deleting its filesystem. - ErrorCodeRmFS = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMFS", - Message: "Unable to remove filesystem for %v: %v", - Description: "While trying to delete a container, the driver failed to remove the filesystem", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmExecDriver is generated when we try to delete a container - // but failed deleting its exec driver data. - ErrorCodeRmExecDriver = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMEXECDRIVER", - Message: "Unable to remove execdriver data for %s: %s", - Description: "While trying to delete a container, there was an error trying to remove th exec driver data", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmVolumeInUse is generated when we try to delete a container - // but failed deleting a volume because its being used. - ErrorCodeRmVolumeInUse = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMVOLUMEINUSE", - Message: "Conflict: %v", - Description: "While trying to delete a container, one of its volumes is still being used", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeRmVolume is generated when we try to delete a container - // but failed deleting a volume. - ErrorCodeRmVolume = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMVOLUME", - Message: "Error while removing volume %s: %v", - Description: "While trying to delete a container, there was an error trying to delete one of its volumes", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeInvalidCpusetCpus is generated when user provided cpuset CPUs - // are invalid. - ErrorCodeInvalidCpusetCpus = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "INVALIDCPUSETCPUS", - Message: "Invalid value %s for cpuset cpus.", - Description: "While verifying the container's 'HostConfig', CpusetCpus value was in an incorrect format", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeInvalidCpusetMems is generated when user provided cpuset mems - // are invalid. - ErrorCodeInvalidCpusetMems = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "INVALIDCPUSETMEMS", - Message: "Invalid value %s for cpuset mems.", - Description: "While verifying the container's 'HostConfig', CpusetMems value was in an incorrect format", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNotAvailableCpusetCpus is generated when user provided cpuset - // CPUs aren't available in the container's cgroup. - ErrorCodeNotAvailableCpusetCpus = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOTAVAILABLECPUSETCPUS", - Message: "Requested CPUs are not available - requested %s, available: %s.", - Description: "While verifying the container's 'HostConfig', cpuset CPUs provided aren't available in the container's cgroup available set", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNotAvailableCpusetMems is generated when user provided cpuset - // memory nodes aren't available in the container's cgroup. - ErrorCodeNotAvailableCpusetMems = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOTAVAILABLECPUSETMEMS", - Message: "Requested memory nodes are not available - requested %s, available: %s.", - Description: "While verifying the container's 'HostConfig', cpuset memory nodes provided aren't available in the container's cgroup available set", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorVolumeNameTaken is generated when an error occurred while - // trying to create a volume that has existed using different driver. - ErrorVolumeNameTaken = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUME_NAME_TAKEN", - Message: "A volume name %s already exists with the %s driver. Choose a different volume name.", - Description: "An attempt to create a volume using a driver but the volume already exists with a different driver", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeCmdNotFound is generated when container cmd can't start, - // container command not found error, exit code 127 - ErrorCodeCmdNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CMDNOTFOUND", - Message: "Container command not found or does not exist.", - Description: "Command could not be found, command does not exist", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeCmdCouldNotBeInvoked is generated when container cmd can't start, - // container command permission denied error, exit code 126 - ErrorCodeCmdCouldNotBeInvoked = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CMDCOULDNOTBEINVOKED", - Message: "Container command could not be invoked.", - Description: "Permission denied, cannot invoke command", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeCantStart is generated when container cmd can't start, - // for any reason other than above 2 errors - ErrorCodeCantStart = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CANTSTART", - Message: "Cannot start container %s: %s", - Description: "There was an error while trying to start a container", - HTTPStatusCode: http.StatusInternalServerError, - }) -) diff --git a/vendor/github.com/docker/docker/errors/error.go b/vendor/github.com/docker/docker/errors/error.go deleted file mode 100644 index 37222d44..00000000 --- a/vendor/github.com/docker/docker/errors/error.go +++ /dev/null @@ -1,6 +0,0 @@ -package errors - -// This file contains all of the errors that can be generated from the -// docker engine but are not tied to any specific top-level component. - -const errGroup = "engine" diff --git a/vendor/github.com/docker/docker/errors/errors.go b/vendor/github.com/docker/docker/errors/errors.go new file mode 100644 index 00000000..8070f48f --- /dev/null +++ b/vendor/github.com/docker/docker/errors/errors.go @@ -0,0 +1,41 @@ +package errors + +import "net/http" + +// apiError is an error wrapper that also +// holds information about response status codes. +type apiError struct { + error + statusCode int +} + +// HTTPErrorStatusCode returns a status code. +func (e apiError) HTTPErrorStatusCode() int { + return e.statusCode +} + +// NewErrorWithStatusCode allows you to associate +// a specific HTTP Status Code to an error. +// The Server will take that code and set +// it as the response status. +func NewErrorWithStatusCode(err error, code int) error { + return apiError{err, code} +} + +// NewBadRequestError creates a new API error +// that has the 400 HTTP status code associated to it. +func NewBadRequestError(err error) error { + return NewErrorWithStatusCode(err, http.StatusBadRequest) +} + +// NewRequestNotFoundError creates a new API error +// that has the 404 HTTP status code associated to it. +func NewRequestNotFoundError(err error) error { + return NewErrorWithStatusCode(err, http.StatusNotFound) +} + +// NewRequestConflictError creates a new API error +// that has the 409 HTTP status code associated to it. +func NewRequestConflictError(err error) error { + return NewErrorWithStatusCode(err, http.StatusConflict) +} diff --git a/vendor/github.com/docker/docker/errors/image.go b/vendor/github.com/docker/docker/errors/image.go deleted file mode 100644 index 04efe6fc..00000000 --- a/vendor/github.com/docker/docker/errors/image.go +++ /dev/null @@ -1,20 +0,0 @@ -package errors - -// This file contains all of the errors that can be generated from the -// docker/image component. - -import ( - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -var ( - // ErrorCodeInvalidImageID is generated when image id specified is incorrectly formatted. - ErrorCodeInvalidImageID = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "INVALIDIMAGEID", - Message: "image ID '%s' is invalid ", - Description: "The specified image id is incorrectly formatted", - HTTPStatusCode: http.StatusInternalServerError, - }) -) diff --git a/vendor/github.com/docker/docker/errors/server.go b/vendor/github.com/docker/docker/errors/server.go deleted file mode 100644 index 1a7af00a..00000000 --- a/vendor/github.com/docker/docker/errors/server.go +++ /dev/null @@ -1,36 +0,0 @@ -package errors - -import ( - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -var ( - // ErrorCodeNewerClientVersion is generated when a request from a client - // specifies a higher version than the server supports. - ErrorCodeNewerClientVersion = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NEWERCLIENTVERSION", - Message: "client is newer than server (client API version: %s, server API version: %s)", - Description: "The client version is higher than the server version", - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeOldClientVersion is generated when a request from a client - // specifies a version lower than the minimum version supported by the server. - ErrorCodeOldClientVersion = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "OLDCLIENTVERSION", - Message: "client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", - Description: "The client version is too old for the server", - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorNetworkControllerNotEnabled is generated when the networking stack in not enabled - // for certain platforms, like windows. - ErrorNetworkControllerNotEnabled = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NETWORK_CONTROLLER_NOT_ENABLED", - Message: "the network controller is not enabled for this platform", - Description: "Docker's networking stack is disabled for this platform", - HTTPStatusCode: http.StatusNotFound, - }) -) diff --git a/vendor/github.com/docker/docker/image/fs.go b/vendor/github.com/docker/docker/image/fs.go new file mode 100644 index 00000000..72c9ab42 --- /dev/null +++ b/vendor/github.com/docker/docker/image/fs.go @@ -0,0 +1,184 @@ +package image + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" +) + +// IDWalkFunc is function called by StoreBackend.Walk +type IDWalkFunc func(id ID) error + +// StoreBackend provides interface for image.Store persistence +type StoreBackend interface { + Walk(f IDWalkFunc) error + Get(id ID) ([]byte, error) + Set(data []byte) (ID, error) + Delete(id ID) error + SetMetadata(id ID, key string, data []byte) error + GetMetadata(id ID, key string) ([]byte, error) + DeleteMetadata(id ID, key string) error +} + +// fs implements StoreBackend using the filesystem. +type fs struct { + sync.RWMutex + root string +} + +const ( + contentDirName = "content" + metadataDirName = "metadata" +) + +// NewFSStoreBackend returns new filesystem based backend for image.Store +func NewFSStoreBackend(root string) (StoreBackend, error) { + return newFSStore(root) +} + +func newFSStore(root string) (*fs, error) { + s := &fs{ + root: root, + } + if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { + return nil, err + } + return s, nil +} + +func (s *fs) contentFile(id ID) string { + dgst := digest.Digest(id) + return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +func (s *fs) metadataDir(id ID) string { + dgst := digest.Digest(id) + return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +// Walk calls the supplied callback for each image ID in the storage backend. +func (s *fs) Walk(f IDWalkFunc) error { + // Only Canonical digest (sha256) is currently supported + s.RLock() + dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) + s.RUnlock() + if err != nil { + return err + } + for _, v := range dir { + dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Skipping invalid digest %s: %s", dgst, err) + continue + } + if err := f(ID(dgst)); err != nil { + return err + } + } + return nil +} + +// Get returns the content stored under a given ID. +func (s *fs) Get(id ID) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + return s.get(id) +} + +func (s *fs) get(id ID) ([]byte, error) { + content, err := ioutil.ReadFile(s.contentFile(id)) + if err != nil { + return nil, err + } + + // todo: maybe optional + if ID(digest.FromBytes(content)) != id { + return nil, fmt.Errorf("failed to verify image: %v", id) + } + + return content, nil +} + +// Set stores content under a given ID. +func (s *fs) Set(data []byte) (ID, error) { + s.Lock() + defer s.Unlock() + + if len(data) == 0 { + return "", fmt.Errorf("Invalid empty data") + } + + id := ID(digest.FromBytes(data)) + filePath := s.contentFile(id) + tempFilePath := s.contentFile(id) + ".tmp" + if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil { + return "", err + } + if err := os.Rename(tempFilePath, filePath); err != nil { + return "", err + } + + return id, nil +} + +// Delete removes content and metadata files associated with the ID. +func (s *fs) Delete(id ID) error { + s.Lock() + defer s.Unlock() + + if err := os.RemoveAll(s.metadataDir(id)); err != nil { + return err + } + if err := os.Remove(s.contentFile(id)); err != nil { + return err + } + return nil +} + +// SetMetadata sets metadata for a given ID. It fails if there's no base file. +func (s *fs) SetMetadata(id ID, key string, data []byte) error { + s.Lock() + defer s.Unlock() + if _, err := s.get(id); err != nil { + return err + } + + baseDir := filepath.Join(s.metadataDir(id)) + if err := os.MkdirAll(baseDir, 0700); err != nil { + return err + } + filePath := filepath.Join(s.metadataDir(id), key) + tempFilePath := filePath + ".tmp" + if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil { + return err + } + return os.Rename(tempFilePath, filePath) +} + +// GetMetadata returns metadata for a given ID. +func (s *fs) GetMetadata(id ID, key string) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + if _, err := s.get(id); err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(s.metadataDir(id), key)) +} + +// DeleteMetadata removes the metadata associated with an ID. +func (s *fs) DeleteMetadata(id ID, key string) error { + s.Lock() + defer s.Unlock() + + return os.RemoveAll(filepath.Join(s.metadataDir(id), key)) +} diff --git a/vendor/github.com/docker/docker/image/image.go b/vendor/github.com/docker/docker/image/image.go index 89799160..52471408 100644 --- a/vendor/github.com/docker/docker/image/image.go +++ b/vendor/github.com/docker/docker/image/image.go @@ -2,36 +2,23 @@ package image import ( "encoding/json" - "fmt" - "regexp" + "errors" + "io" "time" - "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" - derr "github.com/docker/docker/errors" - "github.com/docker/docker/pkg/version" - "github.com/docker/docker/runconfig" + "github.com/docker/engine-api/types/container" ) -var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) +// ID is the content-addressable ID of an image. +type ID digest.Digest -// noFallbackMinVersion is the minimum version for which v1compatibility -// information will not be marshaled through the Image struct to remove -// blank fields. -var noFallbackMinVersion = version.Version("1.8.3") - -// Descriptor provides the information necessary to register an image in -// the graph. -type Descriptor interface { - ID() string - Parent() string - MarshalConfig() ([]byte, error) +func (id ID) String() string { + return digest.Digest(id).String() } -// Image stores the image configuration. -// All fields in this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Image struct { +// V1Image stores the V1 image configuration. +type V1Image struct { // ID a unique 64 character identifier of the image ID string `json:"id,omitempty"` // Parent id of the image @@ -42,108 +29,110 @@ type Image struct { Created time.Time `json:"created"` // Container is the id of the container used to commit Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig runconfig.Config `json:"container_config,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig container.Config `json:"container_config,omitempty"` // DockerVersion specifies version on which image is built DockerVersion string `json:"docker_version,omitempty"` // Author of the image Author string `json:"author,omitempty"` // Config is the configuration of the container received from the client - Config *runconfig.Config `json:"config,omitempty"` + Config *container.Config `json:"config,omitempty"` // Architecture is the hardware that the image is build and runs on Architecture string `json:"architecture,omitempty"` // OS is the operating system used to build and run the image OS string `json:"os,omitempty"` // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` // capitalized for backwards compatibility - // ParentID specifies the strong, content address of the parent configuration. - ParentID digest.Digest `json:"parent_id,omitempty"` - // LayerID provides the content address of the associated layer. - LayerID digest.Digest `json:"layer_id,omitempty"` + Size int64 `json:",omitempty"` } -// NewImgJSON creates an Image configuration from json. -func NewImgJSON(src []byte) (*Image, error) { - ret := &Image{} +// Image stores the image configuration +type Image struct { + V1Image + Parent ID `json:"parent,omitempty"` + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` - // FIXME: Is there a cleaner way to "purify" the input json? - if err := json.Unmarshal(src, ret); err != nil { - return nil, err - } - return ret, nil + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID ID } -// ValidateID checks whether an ID string is a valid image ID. -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return derr.ErrorCodeInvalidImageID.WithArgs(id) - } - return nil +// RawJSON returns the immutable JSON associated with the image. +func (img *Image) RawJSON() []byte { + return img.rawJSON } -// MakeImageConfig returns immutable configuration JSON for image based on the -// v1Compatibility object, layer digest and parent StrongID. SHA256() of this -// config is the new image ID (strongID). -func MakeImageConfig(v1Compatibility []byte, layerID, parentID digest.Digest) ([]byte, error) { +// ID returns the image's content-addressable ID. +func (img *Image) ID() ID { + return img.computedID +} - // Detect images created after 1.8.3 - img, err := NewImgJSON(v1Compatibility) +// ImageID stringizes ID. +func (img *Image) ImageID() string { + return string(img.ID()) +} + +// RunConfig returns the image's container config. +func (img *Image) RunConfig() *container.Config { + return img.Config +} + +// MarshalJSON serializes the image to JSON. It sorts the top-level keys so +// that JSON that's been manipulated by a push/pull cycle with a legacy +// registry won't end up with a different key order. +func (img *Image) MarshalJSON() ([]byte, error) { + type MarshalImage Image + + pass1, err := json.Marshal(MarshalImage(*img)) if err != nil { return nil, err } - useFallback := version.Version(img.DockerVersion).LessThan(noFallbackMinVersion) - - if useFallback { - // Fallback for pre-1.8.3. Calculate base config based on Image struct - // so that fields with default values added by Docker will use same ID - logrus.Debugf("Using fallback hash for %v", layerID) - - v1Compatibility, err = json.Marshal(img) - if err != nil { - return nil, err - } - } var c map[string]*json.RawMessage - if err := json.Unmarshal(v1Compatibility, &c); err != nil { + if err := json.Unmarshal(pass1, &c); err != nil { return nil, err } - - if err := layerID.Validate(); err != nil { - return nil, fmt.Errorf("invalid layerID: %v", err) - } - - c["layer_id"] = rawJSON(layerID) - - if parentID != "" { - if err := parentID.Validate(); err != nil { - return nil, fmt.Errorf("invalid parentID %v", err) - } - c["parent_id"] = rawJSON(parentID) - } - - delete(c, "id") - delete(c, "parent") - delete(c, "Size") // Size is calculated from data on disk and is inconsitent - return json.Marshal(c) } -// StrongID returns image ID for the config JSON. -func StrongID(configJSON []byte) (digest.Digest, error) { - digester := digest.Canonical.New() - if _, err := digester.Hash().Write(configJSON); err != nil { - return "", err - } - dgst := digester.Digest() - logrus.Debugf("H(%v) = %v", string(configJSON), dgst) - return dgst, nil +// History stores build commands that were used to create an image +type History struct { + // Created timestamp for build point + Created time.Time `json:"created"` + // Author of the build point + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building image. + CreatedBy string `json:"created_by,omitempty"` + // Comment is custom message set by the user when creating the image. + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` } -func rawJSON(value interface{}) *json.RawMessage { - jsonval, err := json.Marshal(value) - if err != nil { - return nil - } - return (*json.RawMessage)(&jsonval) +// Exporter provides interface for exporting and importing images +type Exporter interface { + Load(io.ReadCloser, io.Writer, bool) error + // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error + Save([]string, io.Writer) error +} + +// NewFromJSON creates an Image configuration from json. +func NewFromJSON(src []byte) (*Image, error) { + img := &Image{} + + if err := json.Unmarshal(src, img); err != nil { + return nil, err + } + if img.RootFS == nil { + return nil, errors.New("Invalid image JSON, no RootFS key.") + } + + img.rawJSON = src + + return img, nil } diff --git a/vendor/github.com/docker/docker/image/image_test.go b/vendor/github.com/docker/docker/image/image_test.go deleted file mode 100644 index 77d92c44..00000000 --- a/vendor/github.com/docker/docker/image/image_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package image - -import ( - "bytes" - "io/ioutil" - "testing" - - "github.com/docker/distribution/digest" -) - -var fixtures = []string{ - "fixtures/pre1.9", - "fixtures/post1.9", -} - -func loadFixtureFile(t *testing.T, path string) []byte { - fileData, err := ioutil.ReadFile(path) - if err != nil { - t.Fatalf("error opening %s: %v", path, err) - } - - return bytes.TrimSpace(fileData) -} - -// TestMakeImageConfig makes sure that MakeImageConfig returns the expected -// canonical JSON for a reference Image. -func TestMakeImageConfig(t *testing.T) { - for _, fixture := range fixtures { - v1Compatibility := loadFixtureFile(t, fixture+"/v1compatibility") - expectedConfig := loadFixtureFile(t, fixture+"/expected_config") - layerID := digest.Digest(loadFixtureFile(t, fixture+"/layer_id")) - parentID := digest.Digest(loadFixtureFile(t, fixture+"/parent_id")) - - json, err := MakeImageConfig(v1Compatibility, layerID, parentID) - if err != nil { - t.Fatalf("MakeImageConfig on %s returned error: %v", fixture, err) - } - if !bytes.Equal(json, expectedConfig) { - t.Fatalf("did not get expected JSON for %s\nexpected: %s\ngot: %s", fixture, expectedConfig, json) - } - } -} - -// TestGetStrongID makes sure that GetConfigJSON returns the expected -// hash for a reference Image. -func TestGetStrongID(t *testing.T) { - for _, fixture := range fixtures { - expectedConfig := loadFixtureFile(t, fixture+"/expected_config") - expectedComputedID := digest.Digest(loadFixtureFile(t, fixture+"/expected_computed_id")) - - if id, err := StrongID(expectedConfig); err != nil || id != expectedComputedID { - t.Fatalf("did not get expected ID for %s\nexpected: %s\ngot: %s\nerror: %v", fixture, expectedComputedID, id, err) - } - } -} diff --git a/vendor/github.com/docker/docker/image/rootfs.go b/vendor/github.com/docker/docker/image/rootfs.go new file mode 100644 index 00000000..b546696d --- /dev/null +++ b/vendor/github.com/docker/docker/image/rootfs.go @@ -0,0 +1,8 @@ +package image + +import "github.com/docker/docker/layer" + +// Append appends a new diffID to rootfs +func (r *RootFS) Append(id layer.DiffID) { + r.DiffIDs = append(r.DiffIDs, id) +} diff --git a/vendor/github.com/docker/docker/image/rootfs_unix.go b/vendor/github.com/docker/docker/image/rootfs_unix.go new file mode 100644 index 00000000..99c1f8f4 --- /dev/null +++ b/vendor/github.com/docker/docker/image/rootfs_unix.go @@ -0,0 +1,23 @@ +// +build !windows + +package image + +import "github.com/docker/docker/layer" + +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into an interface that supports different implementations. +type RootFS struct { + Type string `json:"type"` + DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` +} + +// ChainID returns the ChainID for the top layer in RootFS. +func (r *RootFS) ChainID() layer.ChainID { + return layer.CreateChainID(r.DiffIDs) +} + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: "layers"} +} diff --git a/vendor/github.com/docker/docker/image/store.go b/vendor/github.com/docker/docker/image/store.go new file mode 100644 index 00000000..92ac438d --- /dev/null +++ b/vendor/github.com/docker/docker/image/store.go @@ -0,0 +1,295 @@ +package image + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +// Store is an interface for creating and accessing images +type Store interface { + Create(config []byte) (ID, error) + Get(id ID) (*Image, error) + Delete(id ID) ([]layer.Metadata, error) + Search(partialID string) (ID, error) + SetParent(id ID, parent ID) error + GetParent(id ID) (ID, error) + Children(id ID) []ID + Map() map[ID]*Image + Heads() map[ID]*Image +} + +// LayerGetReleaser is a minimal interface for getting and releasing images. +type LayerGetReleaser interface { + Get(layer.ChainID) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type imageMeta struct { + layer layer.Layer + children map[ID]struct{} +} + +type store struct { + sync.Mutex + ls LayerGetReleaser + images map[ID]*imageMeta + fs StoreBackend + digestSet *digest.Set +} + +// NewImageStore returns new store object for given layer store +func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) { + is := &store{ + ls: ls, + images: make(map[ID]*imageMeta), + fs: fs, + digestSet: digest.NewSet(), + } + + // load all current images and retain layers + if err := is.restore(); err != nil { + return nil, err + } + + return is, nil +} + +func (is *store) restore() error { + err := is.fs.Walk(func(id ID) error { + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image %v, %v", id, err) + return nil + } + var l layer.Layer + if chainID := img.RootFS.ChainID(); chainID != "" { + l, err = is.ls.Get(chainID) + if err != nil { + return err + } + } + if err := is.digestSet.Add(digest.Digest(id)); err != nil { + return err + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[ID(id)] = imageMeta + + return nil + }) + if err != nil { + return err + } + + // Second pass to fill in children maps + for id := range is.images { + if parent, err := is.GetParent(id); err == nil { + if parentMeta := is.images[parent]; parentMeta != nil { + parentMeta.children[id] = struct{}{} + } + } + } + + return nil +} + +func (is *store) Create(config []byte) (ID, error) { + var img Image + err := json.Unmarshal(config, &img) + if err != nil { + return "", err + } + + // Must reject any config that references diffIDs from the history + // which aren't among the rootfs layers. + rootFSLayers := make(map[layer.DiffID]struct{}) + for _, diffID := range img.RootFS.DiffIDs { + rootFSLayers[diffID] = struct{}{} + } + + layerCounter := 0 + for _, h := range img.History { + if !h.EmptyLayer { + layerCounter++ + } + } + if layerCounter > len(img.RootFS.DiffIDs) { + return "", errors.New("too many non-empty layers in History section") + } + + dgst, err := is.fs.Set(config) + if err != nil { + return "", err + } + imageID := ID(dgst) + + is.Lock() + defer is.Unlock() + + if _, exists := is.images[imageID]; exists { + return imageID, nil + } + + layerID := img.RootFS.ChainID() + + var l layer.Layer + if layerID != "" { + l, err = is.ls.Get(layerID) + if err != nil { + return "", err + } + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[imageID] = imageMeta + if err := is.digestSet.Add(digest.Digest(imageID)); err != nil { + delete(is.images, imageID) + return "", err + } + + return imageID, nil +} + +func (is *store) Search(term string) (ID, error) { + is.Lock() + defer is.Unlock() + + dgst, err := is.digestSet.Lookup(term) + if err != nil { + if err == digest.ErrDigestNotFound { + err = fmt.Errorf("No such image: %s", term) + } + return "", err + } + return ID(dgst), nil +} + +func (is *store) Get(id ID) (*Image, error) { + // todo: Check if image is in images + // todo: Detect manual insertions and start using them + config, err := is.fs.Get(id) + if err != nil { + return nil, err + } + + img, err := NewFromJSON(config) + if err != nil { + return nil, err + } + img.computedID = id + + img.Parent, err = is.GetParent(id) + if err != nil { + img.Parent = "" + } + + return img, nil +} + +func (is *store) Delete(id ID) ([]layer.Metadata, error) { + is.Lock() + defer is.Unlock() + + imageMeta := is.images[id] + if imageMeta == nil { + return nil, fmt.Errorf("unrecognized image ID %s", id.String()) + } + for id := range imageMeta.children { + is.fs.DeleteMetadata(id, "parent") + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + + if err := is.digestSet.Remove(digest.Digest(id)); err != nil { + logrus.Errorf("error removing %s from digest set: %q", id, err) + } + delete(is.images, id) + is.fs.Delete(id) + + if imageMeta.layer != nil { + return is.ls.Release(imageMeta.layer) + } + return nil, nil +} + +func (is *store) SetParent(id, parent ID) error { + is.Lock() + defer is.Unlock() + parentMeta := is.images[parent] + if parentMeta == nil { + return fmt.Errorf("unknown parent image ID %s", parent.String()) + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + parentMeta.children[id] = struct{}{} + return is.fs.SetMetadata(id, "parent", []byte(parent)) +} + +func (is *store) GetParent(id ID) (ID, error) { + d, err := is.fs.GetMetadata(id, "parent") + if err != nil { + return "", err + } + return ID(d), nil // todo: validate? +} + +func (is *store) Children(id ID) []ID { + is.Lock() + defer is.Unlock() + + return is.children(id) +} + +func (is *store) children(id ID) []ID { + var ids []ID + if is.images[id] != nil { + for id := range is.images[id].children { + ids = append(ids, id) + } + } + return ids +} + +func (is *store) Heads() map[ID]*Image { + return is.imagesMap(false) +} + +func (is *store) Map() map[ID]*Image { + return is.imagesMap(true) +} + +func (is *store) imagesMap(all bool) map[ID]*Image { + is.Lock() + defer is.Unlock() + + images := make(map[ID]*Image) + + for id := range is.images { + if !all && len(is.children(id)) > 0 { + continue + } + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image access: %q, error: %q", id, err) + continue + } + images[id] = img + } + return images +} diff --git a/vendor/github.com/docker/docker/image/tarexport/load.go b/vendor/github.com/docker/docker/image/tarexport/load.go new file mode 100644 index 00000000..42eaa40b --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/load.go @@ -0,0 +1,372 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/reference" +) + +func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + var ( + sf = streamformatter.NewJSONStreamFormatter() + progressOutput progress.Output + ) + if !quiet { + progressOutput = sf.NewProgressOutput(outStream, false) + outStream = &streamformatter.StdoutFormatter{Writer: outStream, StreamFormatter: streamformatter.NewJSONStreamFormatter()} + } + + tmpDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { + return err + } + // read manifest, if no file then load in legacy mode + manifestPath, err := safePath(tmpDir, manifestFileName) + if err != nil { + return err + } + manifestFile, err := os.Open(manifestPath) + if err != nil { + if os.IsNotExist(err) { + return l.legacyLoad(tmpDir, outStream, progressOutput) + } + return manifestFile.Close() + } + defer manifestFile.Close() + + var manifest []manifestItem + if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { + return err + } + + var parentLinks []parentLink + + for _, m := range manifest { + configPath, err := safePath(tmpDir, m.Config) + if err != nil { + return err + } + config, err := ioutil.ReadFile(configPath) + if err != nil { + return err + } + img, err := image.NewFromJSON(config) + if err != nil { + return err + } + var rootFS image.RootFS + rootFS = *img.RootFS + rootFS.DiffIDs = nil + + if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { + return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual) + } + + for i, diffID := range img.RootFS.DiffIDs { + layerPath, err := safePath(tmpDir, m.Layers[i]) + if err != nil { + return err + } + r := rootFS + r.Append(diffID) + newLayer, err := l.ls.Get(r.ChainID()) + if err != nil { + newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), progressOutput) + if err != nil { + return err + } + } + defer layer.ReleaseAndLog(l.ls, newLayer) + if expected, actual := diffID, newLayer.DiffID(); expected != actual { + return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) + } + rootFS.Append(diffID) + } + + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + for _, repoTag := range m.RepoTags { + named, err := reference.ParseNamed(repoTag) + if err != nil { + return err + } + ref, ok := named.(reference.NamedTagged) + if !ok { + return fmt.Errorf("invalid tag %q", repoTag) + } + l.setLoadedTag(ref, imgID, outStream) + } + + parentLinks = append(parentLinks, parentLink{imgID, m.Parent}) + } + + for _, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + if err := l.setParentID(p.id, p.parentID); err != nil { + return err + } + } + } + + return nil +} + +func (l *tarexporter) setParentID(id, parentID image.ID) error { + img, err := l.is.Get(id) + if err != nil { + return err + } + parent, err := l.is.Get(parentID) + if err != nil { + return err + } + if !checkValidParent(img, parent) { + return fmt.Errorf("image %v is not a valid parent for %v", parent.ID, img.ID) + } + return l.is.SetParent(id, parentID) +} + +func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, progressOutput progress.Output) (layer.Layer, error) { + rawTar, err := os.Open(filename) + if err != nil { + logrus.Debugf("Error reading embedded tar: %v", err) + return nil, err + } + defer rawTar.Close() + + inflatedLayerData, err := archive.DecompressStream(rawTar) + if err != nil { + return nil, err + } + defer inflatedLayerData.Close() + + if progressOutput != nil { + fileInfo, err := os.Stat(filename) + if err != nil { + logrus.Debugf("Error statting file: %v", err) + return nil, err + } + + progressReader := progress.NewProgressReader(inflatedLayerData, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer") + + return l.ls.Register(progressReader, rootFS.ChainID()) + } + return l.ls.Register(inflatedLayerData, rootFS.ChainID()) +} + +func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID image.ID, outStream io.Writer) error { + if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { + fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags + } + + if err := l.rs.AddTag(ref, imgID, true); err != nil { + return err + } + return nil +} + +func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOutput progress.Output) error { + legacyLoadedMap := make(map[string]image.ID) + + dirs, err := ioutil.ReadDir(tmpDir) + if err != nil { + return err + } + + // every dir represents an image + for _, d := range dirs { + if d.IsDir() { + if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap, progressOutput); err != nil { + return err + } + } + } + + // load tags from repositories file + repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) + if err != nil { + return err + } + repositoriesFile, err := os.Open(repositoriesPath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + return repositoriesFile.Close() + } + defer repositoriesFile.Close() + + repositories := make(map[string]map[string]string) + if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { + return err + } + + for name, tagMap := range repositories { + for tag, oldID := range tagMap { + imgID, ok := legacyLoadedMap[oldID] + if !ok { + return fmt.Errorf("invalid target ID: %v", oldID) + } + named, err := reference.WithName(name) + if err != nil { + return err + } + ref, err := reference.WithTag(named, tag) + if err != nil { + return err + } + l.setLoadedTag(ref, imgID, outStream) + } + } + + return nil +} + +func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID, progressOutput progress.Output) error { + if _, loaded := loadedMap[oldID]; loaded { + return nil + } + configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) + if err != nil { + return err + } + imageJSON, err := ioutil.ReadFile(configPath) + if err != nil { + logrus.Debugf("Error reading json: %v", err) + return err + } + + var img struct{ Parent string } + if err := json.Unmarshal(imageJSON, &img); err != nil { + return err + } + + var parentID image.ID + if img.Parent != "" { + for { + var loaded bool + if parentID, loaded = loadedMap[img.Parent]; !loaded { + if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap, progressOutput); err != nil { + return err + } + } else { + break + } + } + } + + // todo: try to connect with migrate code + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := l.is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) + if err != nil { + return err + } + newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, progressOutput) + if err != nil { + return err + } + rootFS.Append(newLayer.DiffID()) + + h, err := v1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + metadata, err := l.ls.Release(newLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + return err + } + + if parentID != "" { + if err := l.is.SetParent(imgID, parentID); err != nil { + return err + } + } + + loadedMap[oldID] = imgID + return nil +} + +func safePath(base, path string) (string, error) { + return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) +} + +type parentLink struct { + id, parentID image.ID +} + +func validatedParentLinks(pl []parentLink) (ret []parentLink) { +mainloop: + for i, p := range pl { + ret = append(ret, p) + for _, p2 := range pl { + if p2.id == p.parentID && p2.id != p.id { + continue mainloop + } + } + ret[i].parentID = "" + } + return +} + +func checkValidParent(img, parent *image.Image) bool { + if len(img.History) == 0 && len(parent.History) == 0 { + return true // having history is not mandatory + } + if len(img.History)-len(parent.History) != 1 { + return false + } + for i, h := range parent.History { + if !reflect.DeepEqual(h, img.History[i]) { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/image/tarexport/save.go b/vendor/github.com/docker/docker/image/tarexport/save.go new file mode 100644 index 00000000..9ec3cc9f --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/save.go @@ -0,0 +1,319 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/reference" +) + +type imageDescriptor struct { + refs []reference.NamedTagged + layers []string +} + +type saveSession struct { + *tarexporter + outDir string + images map[image.ID]*imageDescriptor + savedLayers map[string]struct{} +} + +func (l *tarexporter) Save(names []string, outStream io.Writer) error { + images, err := l.parseNames(names) + if err != nil { + return err + } + + return (&saveSession{tarexporter: l, images: images}).save(outStream) +} + +func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) { + imgDescr := make(map[image.ID]*imageDescriptor) + + addAssoc := func(id image.ID, ref reference.Named) { + if _, ok := imgDescr[id]; !ok { + imgDescr[id] = &imageDescriptor{} + } + + if ref != nil { + var tagged reference.NamedTagged + if _, ok := ref.(reference.Canonical); ok { + return + } + var ok bool + if tagged, ok = ref.(reference.NamedTagged); !ok { + var err error + if tagged, err = reference.WithTag(ref, reference.DefaultTag); err != nil { + return + } + } + + for _, t := range imgDescr[id].refs { + if tagged.String() == t.String() { + return + } + } + imgDescr[id].refs = append(imgDescr[id].refs, tagged) + } + } + + for _, name := range names { + id, ref, err := reference.ParseIDOrReference(name) + if err != nil { + return nil, err + } + if id != "" { + _, err := l.is.Get(image.ID(id)) + if err != nil { + return nil, err + } + addAssoc(image.ID(id), nil) + continue + } + if ref.Name() == string(digest.Canonical) { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + continue + } + if reference.IsNameOnly(ref) { + assocs := l.rs.ReferencesByName(ref) + for _, assoc := range assocs { + addAssoc(assoc.ImageID, assoc.Ref) + } + if len(assocs) == 0 { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + } + continue + } + var imgID image.ID + if imgID, err = l.rs.Get(ref); err != nil { + return nil, err + } + addAssoc(imgID, ref) + + } + return imgDescr, nil +} + +func (s *saveSession) save(outStream io.Writer) error { + s.savedLayers = make(map[string]struct{}) + + // get image json + tempDir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return err + } + defer os.RemoveAll(tempDir) + + s.outDir = tempDir + reposLegacy := make(map[string]map[string]string) + + var manifest []manifestItem + var parentLinks []parentLink + + for id, imageDescr := range s.images { + if err = s.saveImage(id); err != nil { + return err + } + + var repoTags []string + var layers []string + + for _, ref := range imageDescr.refs { + if _, ok := reposLegacy[ref.Name()]; !ok { + reposLegacy[ref.Name()] = make(map[string]string) + } + reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] + repoTags = append(repoTags, ref.String()) + } + + for _, l := range imageDescr.layers { + layers = append(layers, filepath.Join(l, legacyLayerFileName)) + } + + manifest = append(manifest, manifestItem{ + Config: digest.Digest(id).Hex() + ".json", + RepoTags: repoTags, + Layers: layers, + }) + + parentID, _ := s.is.GetParent(id) + parentLinks = append(parentLinks, parentLink{id, parentID}) + } + + for i, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + manifest[i].Parent = p.parentID + } + } + + if len(reposLegacy) > 0 { + reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) + f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + f.Close() + return err + } + if err := json.NewEncoder(f).Encode(reposLegacy); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + } + + manifestFileName := filepath.Join(tempDir, manifestFileName) + f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + f.Close() + return err + } + if err := json.NewEncoder(f).Encode(manifest); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + + fs, err := archive.Tar(tempDir, archive.Uncompressed) + if err != nil { + return err + } + defer fs.Close() + + if _, err := io.Copy(outStream, fs); err != nil { + return err + } + return nil +} + +func (s *saveSession) saveImage(id image.ID) error { + img, err := s.is.Get(id) + if err != nil { + return err + } + + if len(img.RootFS.DiffIDs) == 0 { + return fmt.Errorf("empty export - not implemented") + } + + var parent digest.Digest + var layers []string + for i := range img.RootFS.DiffIDs { + v1Img := image.V1Image{} + if i == len(img.RootFS.DiffIDs)-1 { + v1Img = img.V1Image + } + rootFS := *img.RootFS + rootFS.DiffIDs = rootFS.DiffIDs[:i+1] + v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) + if err != nil { + return err + } + + v1Img.ID = v1ID.Hex() + if parent != "" { + v1Img.Parent = parent.Hex() + } + + if err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created); err != nil { + return err + } + layers = append(layers, v1Img.ID) + parent = v1ID + } + + configFile := filepath.Join(s.outDir, digest.Digest(id).Hex()+".json") + if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { + return err + } + if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { + return err + } + + s.images[id].layers = layers + return nil +} + +func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) error { + if _, exists := s.savedLayers[legacyImg.ID]; exists { + return nil + } + + outDir := filepath.Join(s.outDir, legacyImg.ID) + if err := os.Mkdir(outDir, 0755); err != nil { + return err + } + + // todo: why is this version file here? + if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { + return err + } + + imageConfig, err := json.Marshal(legacyImg) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { + return err + } + + // serialize filesystem + tarFile, err := os.Create(filepath.Join(outDir, legacyLayerFileName)) + if err != nil { + return err + } + defer tarFile.Close() + + l, err := s.ls.Get(id) + if err != nil { + return err + } + defer layer.ReleaseAndLog(s.ls, l) + + arch, err := l.TarStream() + if err != nil { + return err + } + defer arch.Close() + + if _, err := io.Copy(tarFile, arch); err != nil { + return err + } + + for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { + // todo: maybe save layer created timestamp? + if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { + return err + } + } + + s.savedLayers[legacyImg.ID] = struct{}{} + return nil +} diff --git a/vendor/github.com/docker/docker/image/tarexport/tarexport.go b/vendor/github.com/docker/docker/image/tarexport/tarexport.go new file mode 100644 index 00000000..5e208777 --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/tarexport.go @@ -0,0 +1,37 @@ +package tarexport + +import ( + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +type manifestItem struct { + Config string + RepoTags []string + Layers []string + Parent image.ID `json:",omitempty"` +} + +type tarexporter struct { + is image.Store + ls layer.Store + rs reference.Store +} + +// NewTarExporter returns new ImageExporter for tar packages +func NewTarExporter(is image.Store, ls layer.Store, rs reference.Store) image.Exporter { + return &tarexporter{ + is: is, + ls: ls, + rs: rs, + } +} diff --git a/vendor/github.com/docker/docker/image/v1/imagev1.go b/vendor/github.com/docker/docker/image/v1/imagev1.go new file mode 100644 index 00000000..e27ebd4c --- /dev/null +++ b/vendor/github.com/docker/docker/image/v1/imagev1.go @@ -0,0 +1,148 @@ +package v1 + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/version" +) + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +// noFallbackMinVersion is the minimum version for which v1compatibility +// information will not be marshaled through the Image struct to remove +// blank fields. +var noFallbackMinVersion = version.Version("1.8.3") + +// HistoryFromConfig creates a History struct from v1 configuration JSON +func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { + h := image.History{} + var v1Image image.V1Image + if err := json.Unmarshal(imageJSON, &v1Image); err != nil { + return h, err + } + + return image.History{ + Author: v1Image.Author, + Created: v1Image.Created, + CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "), + Comment: v1Image.Comment, + EmptyLayer: emptyLayer, + }, nil +} + +// CreateID creates an ID from v1 image, layerID and parent ID. +// Used for backwards compatibility with old clients. +func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { + v1Image.ID = "" + v1JSON, err := json.Marshal(v1Image) + if err != nil { + return "", err + } + + var config map[string]*json.RawMessage + if err := json.Unmarshal(v1JSON, &config); err != nil { + return "", err + } + + // FIXME: note that this is slightly incompatible with RootFS logic + config["layer_id"] = rawJSON(layerID) + if parent != "" { + config["parent"] = rawJSON(parent) + } + + configJSON, err := json.Marshal(config) + if err != nil { + return "", err + } + logrus.Debugf("CreateV1ID %s", configJSON) + + return digest.FromBytes(configJSON), nil +} + +// MakeConfigFromV1Config creates an image config from the legacy V1 config format. +func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { + var dver struct { + DockerVersion string `json:"docker_version"` + } + + if err := json.Unmarshal(imageJSON, &dver); err != nil { + return nil, err + } + + useFallback := version.Version(dver.DockerVersion).LessThan(noFallbackMinVersion) + + if useFallback { + var v1Image image.V1Image + err := json.Unmarshal(imageJSON, &v1Image) + if err != nil { + return nil, err + } + imageJSON, err = json.Marshal(v1Image) + if err != nil { + return nil, err + } + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(imageJSON, &c); err != nil { + return nil, err + } + + delete(c, "id") + delete(c, "parent") + delete(c, "Size") // Size is calculated from data on disk and is inconsistent + delete(c, "parent_id") + delete(c, "layer_id") + delete(c, "throwaway") + + c["rootfs"] = rawJSON(rootfs) + c["history"] = rawJSON(history) + + return json.Marshal(c) +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct +func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + delete(configAsMap, "rootfs") + delete(configAsMap, "history") + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} diff --git a/vendor/github.com/docker/docker/layer/empty.go b/vendor/github.com/docker/docker/layer/empty.go new file mode 100644 index 00000000..5e1cb184 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/empty.go @@ -0,0 +1,48 @@ +package layer + +import ( + "archive/tar" + "bytes" + "io" + "io/ioutil" +) + +// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - +// (1024 NULL bytes) +const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") + +type emptyLayer struct{} + +// EmptyLayer is a layer that corresponds to empty tar. +var EmptyLayer = &emptyLayer{} + +func (el *emptyLayer) TarStream() (io.ReadCloser, error) { + buf := new(bytes.Buffer) + tarWriter := tar.NewWriter(buf) + tarWriter.Close() + return ioutil.NopCloser(buf), nil +} + +func (el *emptyLayer) ChainID() ChainID { + return ChainID(DigestSHA256EmptyTar) +} + +func (el *emptyLayer) DiffID() DiffID { + return DigestSHA256EmptyTar +} + +func (el *emptyLayer) Parent() Layer { + return nil +} + +func (el *emptyLayer) Size() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} diff --git a/vendor/github.com/docker/docker/layer/filestore.go b/vendor/github.com/docker/docker/layer/filestore.go new file mode 100644 index 00000000..a0044b36 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/filestore.go @@ -0,0 +1,326 @@ +package layer + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) + supportedAlgorithms = []digest.Algorithm{ + digest.SHA256, + // digest.SHA384, // Currently not used + // digest.SHA512, // Currently not used + } +) + +type fileMetadataStore struct { + root string +} + +type fileMetadataTransaction struct { + store *fileMetadataStore + root string +} + +// NewFSMetadataStore returns an instance of a metadata store +// which is backed by files on disk using the provided root +// as the root of metadata files. +func NewFSMetadataStore(root string) (MetadataStore, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + return &fileMetadataStore{ + root: root, + }, nil +} + +func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { + dgst := digest.Digest(layer) + return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) +} + +func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { + return filepath.Join(fms.getLayerDirectory(layer), filename) +} + +func (fms *fileMetadataStore) getMountDirectory(mount string) string { + return filepath.Join(fms.root, "mounts", mount) +} + +func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { + return filepath.Join(fms.getMountDirectory(mount), filename) +} + +func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { + tmpDir := filepath.Join(fms.root, "tmp") + if err := os.MkdirAll(tmpDir, 0755); err != nil { + return nil, err + } + + td, err := ioutil.TempDir(tmpDir, "layer-") + if err != nil { + return nil, err + } + // Create a new tempdir + return &fileMetadataTransaction{ + store: fms, + root: td, + }, nil +} + +func (fm *fileMetadataTransaction) SetSize(size int64) error { + content := fmt.Sprintf("%d", size) + return ioutil.WriteFile(filepath.Join(fm.root, "size"), []byte(content), 0644) +} + +func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { + return ioutil.WriteFile(filepath.Join(fm.root, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { + return ioutil.WriteFile(filepath.Join(fm.root, "diff"), []byte(digest.Digest(diff).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { + return ioutil.WriteFile(filepath.Join(fm.root, "cache-id"), []byte(cacheID), 0644) +} + +func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(fm.root, "tar-split.json.gz"), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + var wc io.WriteCloser + if compressInput { + wc = gzip.NewWriter(f) + } else { + wc = f + } + + return ioutils.NewWriteCloserWrapper(wc, func() error { + wc.Close() + return f.Close() + }), nil +} + +func (fm *fileMetadataTransaction) Commit(layer ChainID) error { + finalDir := fm.store.getLayerDirectory(layer) + if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { + return err + } + return os.Rename(fm.root, finalDir) +} + +func (fm *fileMetadataTransaction) Cancel() error { + return os.RemoveAll(fm.root) +} + +func (fm *fileMetadataTransaction) String() string { + return fm.root +} + +func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) + if err != nil { + return 0, err + } + + size, err := strconv.ParseInt(string(content), 10, 64) + if err != nil { + return 0, err + } + + return size, nil +} + +func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) + if err != nil { + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return DiffID(dgst), nil +} + +func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid cache id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { + fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) + if err != nil { + return nil, err + } + f, err := gzip.NewReader(fz) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(f, func() error { + f.Close() + return fz.Close() + }), nil +} + +func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) +} + +func (fms *fileMetadataStore) SetInitID(mount string, init string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) +} + +func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid mount id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid init id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { + var ids []ChainID + for _, algorithm := range supportedAlgorithms { + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, nil, err + } + + for _, fi := range fileInfos { + if fi.IsDir() && fi.Name() != "mounts" { + dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) + } else { + ids = append(ids, ChainID(dgst)) + } + } + } + } + + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) + if err != nil { + if os.IsNotExist(err) { + return ids, []string{}, nil + } + return nil, nil, err + } + + var mounts []string + for _, fi := range fileInfos { + if fi.IsDir() { + mounts = append(mounts, fi.Name()) + } + } + + return ids, mounts, nil +} + +func (fms *fileMetadataStore) Remove(layer ChainID) error { + return os.RemoveAll(fms.getLayerDirectory(layer)) +} + +func (fms *fileMetadataStore) RemoveMount(mount string) error { + return os.RemoveAll(fms.getMountDirectory(mount)) +} diff --git a/vendor/github.com/docker/docker/layer/layer.go b/vendor/github.com/docker/docker/layer/layer.go new file mode 100644 index 00000000..ad01e89a --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer.go @@ -0,0 +1,262 @@ +// Package layer is package for managing read-only +// and read-write mounts on the union file system +// driver. Read-only mounts are referenced using a +// content hash and are protected from mutation in +// the exposed interface. The tar format is used +// to create read-only layers and export both +// read-only and writable layers. The exported +// tar data for a read-only layer should match +// the tar used to create the layer. +package layer + +import ( + "errors" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/archive" +) + +var ( + // ErrLayerDoesNotExist is used when an operation is + // attempted on a layer which does not exist. + ErrLayerDoesNotExist = errors.New("layer does not exist") + + // ErrLayerNotRetained is used when a release is + // attempted on a layer which is not retained. + ErrLayerNotRetained = errors.New("layer not retained") + + // ErrMountDoesNotExist is used when an operation is + // attempted on a mount layer which does not exist. + ErrMountDoesNotExist = errors.New("mount does not exist") + + // ErrMountNameConflict is used when a mount is attempted + // to be created but there is already a mount with the name + // used for creation. + ErrMountNameConflict = errors.New("mount already exists with name") + + // ErrActiveMount is used when an operation on a + // mount is attempted but the layer is still + // mounted and the operation cannot be performed. + ErrActiveMount = errors.New("mount still active") + + // ErrNotMounted is used when requesting an active + // mount but the layer is not mounted. + ErrNotMounted = errors.New("not mounted") + + // ErrMaxDepthExceeded is used when a layer is attempted + // to be created which would result in a layer depth + // greater than the 125 max. + ErrMaxDepthExceeded = errors.New("max depth exceeded") + + // ErrNotSupported is used when the action is not supppoted + // on the current platform + ErrNotSupported = errors.New("not support on this platform") +) + +// ChainID is the content-addressable ID of a layer. +type ChainID digest.Digest + +// String returns a string rendition of a layer ID +func (id ChainID) String() string { + return string(id) +} + +// DiffID is the hash of an individual layer tar. +type DiffID digest.Digest + +// String returns a string rendition of a layer DiffID +func (diffID DiffID) String() string { + return string(diffID) +} + +// TarStreamer represents an object which may +// have its contents exported as a tar stream. +type TarStreamer interface { + // TarStream returns a tar archive stream + // for the contents of a layer. + TarStream() (io.ReadCloser, error) +} + +// Layer represents a read-only layer +type Layer interface { + TarStreamer + + // ChainID returns the content hash of the entire layer chain. The hash + // chain is made up of DiffID of top layer and all of its parents. + ChainID() ChainID + + // DiffID returns the content hash of the layer + // tar stream used to create this layer. + DiffID() DiffID + + // Parent returns the next layer in the layer chain. + Parent() Layer + + // Size returns the size of the entire layer chain. The size + // is calculated from the total size of all files in the layers. + Size() (int64, error) + + // DiffSize returns the size difference of the top layer + // from parent layer. + DiffSize() (int64, error) + + // Metadata returns the low level storage metadata associated + // with layer. + Metadata() (map[string]string, error) +} + +// RWLayer represents a layer which is +// read and writable +type RWLayer interface { + TarStreamer + + // Name of mounted layer + Name() string + + // Parent returns the layer which the writable + // layer was created from. + Parent() Layer + + // Mount mounts the RWLayer and returns the filesystem path + // the to the writable layer. + Mount(mountLabel string) (string, error) + + // Unmount unmounts the RWLayer. This should be called + // for every mount. If there are multiple mount calls + // this operation will only decrement the internal mount counter. + Unmount() error + + // Size represents the size of the writable layer + // as calculated by the total size of the files + // changed in the mutable layer. + Size() (int64, error) + + // Changes returns the set of changes for the mutable layer + // from the base layer. + Changes() ([]archive.Change, error) + + // Metadata returns the low level metadata for the mutable layer + Metadata() (map[string]string, error) +} + +// Metadata holds information about a +// read-only layer +type Metadata struct { + // ChainID is the content hash of the layer + ChainID ChainID + + // DiffID is the hash of the tar data used to + // create the layer + DiffID DiffID + + // Size is the size of the layer and all parents + Size int64 + + // DiffSize is the size of the top layer + DiffSize int64 +} + +// MountInit is a function to initialize a +// writable mount. Changes made here will +// not be included in the Tar stream of the +// RWLayer. +type MountInit func(root string) error + +// Store represents a backend for managing both +// read-only and read-write layers. +type Store interface { + Register(io.Reader, ChainID) (Layer, error) + Get(ChainID) (Layer, error) + Release(Layer) ([]Metadata, error) + + CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error) + GetRWLayer(id string) (RWLayer, error) + GetMountID(id string) (string, error) + ReinitRWLayer(l RWLayer) error + ReleaseRWLayer(RWLayer) ([]Metadata, error) + + Cleanup() error + DriverStatus() [][2]string + DriverName() string +} + +// MetadataTransaction represents functions for setting layer metadata +// with a single transaction. +type MetadataTransaction interface { + SetSize(int64) error + SetParent(parent ChainID) error + SetDiffID(DiffID) error + SetCacheID(string) error + TarSplitWriter(compressInput bool) (io.WriteCloser, error) + + Commit(ChainID) error + Cancel() error + String() string +} + +// MetadataStore represents a backend for persisting +// metadata about layers and providing the metadata +// for restoring a Store. +type MetadataStore interface { + // StartTransaction starts an update for new metadata + // which will be used to represent an ID on commit. + StartTransaction() (MetadataTransaction, error) + + GetSize(ChainID) (int64, error) + GetParent(ChainID) (ChainID, error) + GetDiffID(ChainID) (DiffID, error) + GetCacheID(ChainID) (string, error) + TarSplitReader(ChainID) (io.ReadCloser, error) + + SetMountID(string, string) error + SetInitID(string, string) error + SetMountParent(string, ChainID) error + + GetMountID(string) (string, error) + GetInitID(string) (string, error) + GetMountParent(string) (ChainID, error) + + // List returns the full list of referenced + // read-only and read-write layers + List() ([]ChainID, []string, error) + + Remove(ChainID) error + RemoveMount(string) error +} + +// CreateChainID returns ID for a layerDigest slice +func CreateChainID(dgsts []DiffID) ChainID { + return createChainIDFromParent("", dgsts...) +} + +func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) +} + +// ReleaseAndLog releases the provided layer from the given layer +// store, logging any error and release metadata +func ReleaseAndLog(ls Store, l Layer) { + metadata, err := ls.Release(l) + if err != nil { + logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) + } + LogReleaseMetadata(metadata) +} + +// LogReleaseMetadata logs a metadata array, uses this to +// ensure consistent logging for release metadata +func LogReleaseMetadata(metadatas []Metadata) { + for _, metadata := range metadatas { + logrus.Infof("Layer %s cleaned up", metadata.ChainID) + } +} diff --git a/vendor/github.com/docker/docker/layer/layer_store.go b/vendor/github.com/docker/docker/layer/layer_store.go new file mode 100644 index 00000000..73a1b34c --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_store.go @@ -0,0 +1,666 @@ +package layer + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// maxLayerDepth represents the maximum number of +// layers which can be chained together. 125 was +// chosen to account for the 127 max in some +// graphdrivers plus the 2 additional layers +// used to create a rwlayer. +const maxLayerDepth = 125 + +type layerStore struct { + store MetadataStore + driver graphdriver.Driver + + layerMap map[ChainID]*roLayer + layerL sync.Mutex + + mounts map[string]*mountedLayer + mountL sync.Mutex +} + +// StoreOptions are the options used to create a new Store instance +type StoreOptions struct { + StorePath string + MetadataStorePathTemplate string + GraphDriver string + GraphDriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap +} + +// NewStoreFromOptions creates a new Store instance +func NewStoreFromOptions(options StoreOptions) (Store, error) { + driver, err := graphdriver.New( + options.StorePath, + options.GraphDriver, + options.GraphDriverOptions, + options.UIDMaps, + options.GIDMaps) + if err != nil { + return nil, fmt.Errorf("error initializing graphdriver: %v", err) + } + logrus.Debugf("Using graph driver %s", driver) + + fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) + if err != nil { + return nil, err + } + + return NewStoreFromGraphDriver(fms, driver) +} + +// NewStoreFromGraphDriver creates a new Store instance using the provided +// metadata store and graph driver. The metadata store will be used to restore +// the Store. +func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) { + ls := &layerStore{ + store: store, + driver: driver, + layerMap: map[ChainID]*roLayer{}, + mounts: map[string]*mountedLayer{}, + } + + ids, mounts, err := store.List() + if err != nil { + return nil, err + } + + for _, id := range ids { + l, err := ls.loadLayer(id) + if err != nil { + logrus.Debugf("Failed to load layer %s: %s", id, err) + continue + } + if l.parent != nil { + l.parent.referenceCount++ + } + } + + for _, mount := range mounts { + if err := ls.loadMount(mount); err != nil { + logrus.Debugf("Failed to load mount %s: %s", mount, err) + } + } + + return ls, nil +} + +func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { + cl, ok := ls.layerMap[layer] + if ok { + return cl, nil + } + + diff, err := ls.store.GetDiffID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) + } + + size, err := ls.store.GetSize(layer) + if err != nil { + return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) + } + + cacheID, err := ls.store.GetCacheID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) + } + + parent, err := ls.store.GetParent(layer) + if err != nil { + return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) + } + + cl = &roLayer{ + chainID: layer, + diffID: diff, + size: size, + cacheID: cacheID, + layerStore: ls, + references: map[Layer]struct{}{}, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return nil, err + } + cl.parent = p + } + + ls.layerMap[cl.chainID] = cl + + return cl, nil +} + +func (ls *layerStore) loadMount(mount string) error { + if _, ok := ls.mounts[mount]; ok { + return nil + } + + mountID, err := ls.store.GetMountID(mount) + if err != nil { + return err + } + + initID, err := ls.store.GetInitID(mount) + if err != nil { + return err + } + + parent, err := ls.store.GetMountParent(mount) + if err != nil { + return err + } + + ml := &mountedLayer{ + name: mount, + mountID: mountID, + initID: initID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return err + } + ml.parent = p + + p.referenceCount++ + } + + ls.mounts[ml.name] = ml + + return nil +} + +func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { + digester := digest.Canonical.New() + tr := io.TeeReader(ts, digester.Hash()) + + tsw, err := tx.TarSplitWriter(true) + if err != nil { + return err + } + metaPacker := storage.NewJSONPacker(tsw) + defer tsw.Close() + + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err := asm.NewInputTarStream(tr, metaPacker, nil) + if err != nil { + return err + } + + applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, archive.Reader(rdr)) + if err != nil { + return err + } + + // Discard trailing data but ensure metadata is picked up to reconstruct stream + io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed + + layer.size = applySize + layer.diffID = DiffID(digester.Digest()) + + logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) + + return nil +} + +func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + if p.depth() >= maxLayerDepth { + err = ErrMaxDepthExceeded + return nil, err + } + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: stringid.GenerateRandomID(), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + } + + if err = ls.driver.Create(layer.cacheID, pid, ""); err != nil { + return nil, err + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) + if err := ls.driver.Remove(layer.cacheID); err != nil { + logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) + } + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + if err = ls.applyTar(tx, ts, pid, layer); err != nil { + return nil, err + } + + if layer.parent == nil { + layer.chainID = ChainID(layer.diffID) + } else { + layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return the error + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { + l, ok := ls.layerMap[layer] + if !ok { + return nil + } + + l.referenceCount++ + + return l +} + +func (ls *layerStore) get(l ChainID) *roLayer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + return ls.getWithoutLock(l) +} + +func (ls *layerStore) Get(l ChainID) (Layer, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layer := ls.getWithoutLock(l) + if layer == nil { + return nil, ErrLayerDoesNotExist + } + + return layer.getReference(), nil +} + +func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { + err := ls.driver.Remove(layer.cacheID) + if err != nil { + return err + } + + err = ls.store.Remove(layer.chainID) + if err != nil { + return err + } + metadata.DiffID = layer.diffID + metadata.ChainID = layer.chainID + metadata.Size, err = layer.Size() + if err != nil { + return err + } + metadata.DiffSize = layer.size + + return nil +} + +func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { + depth := 0 + removed := []Metadata{} + for { + if l.referenceCount == 0 { + panic("layer not retained") + } + l.referenceCount-- + if l.referenceCount != 0 { + return removed, nil + } + + if len(removed) == 0 && depth > 0 { + panic("cannot remove layer with child") + } + if l.hasReferences() { + panic("cannot delete referenced layer") + } + var metadata Metadata + if err := ls.deleteLayer(l, &metadata); err != nil { + return nil, err + } + + delete(ls.layerMap, l.chainID) + removed = append(removed, metadata) + + if l.parent == nil { + return removed, nil + } + + depth++ + l = l.parent + } +} + +func (ls *layerStore) Release(l Layer) ([]Metadata, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + layer, ok := ls.layerMap[l.ChainID()] + if !ok { + return []Metadata{}, nil + } + if !layer.hasReference(l) { + return nil, ErrLayerNotRetained + } + + layer.deleteReference(l) + + return ls.releaseLayer(layer) +} + +func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + return nil, ErrMountNameConflict + } + + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + m = &mountedLayer{ + name: name, + parent: p, + mountID: ls.mountID(name), + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if initFunc != nil { + pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc) + if err != nil { + return nil, err + } + m.initID = pid + } + + if err = ls.driver.Create(m.mountID, pid, ""); err != nil { + return nil, err + } + + if err = ls.saveMount(m); err != nil { + return nil, err + } + + return m.getReference(), nil +} + +func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return nil, ErrMountDoesNotExist + } + + return mount.getReference(), nil +} + +func (ls *layerStore) GetMountID(id string) (string, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return "", ErrMountDoesNotExist + } + logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) + + return mount.mountID, nil +} + +// ReinitRWLayer reinitializes a given mount to the layerstore, specifically +// initializing the usage count. It should strictly only be used in the +// daemon's restore path to restore state of live containers. +func (ls *layerStore) ReinitRWLayer(l RWLayer) error { + ls.mountL.Lock() + defer ls.mountL.Unlock() + + m, ok := ls.mounts[l.Name()] + if !ok { + return ErrMountDoesNotExist + } + + if err := m.incActivityCount(l); err != nil { + return err + } + + return nil +} + +func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[l.Name()] + if !ok { + return []Metadata{}, nil + } + + if err := m.deleteReference(l); err != nil { + return nil, err + } + + if m.hasReferences() { + return []Metadata{}, nil + } + + if err := ls.driver.Remove(m.mountID); err != nil { + logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + if m.initID != "" { + if err := ls.driver.Remove(m.initID); err != nil { + logrus.Errorf("Error removing init layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + } + + if err := ls.store.RemoveMount(m.name); err != nil { + logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + delete(ls.mounts, m.Name()) + + ls.layerL.Lock() + defer ls.layerL.Unlock() + if m.parent != nil { + return ls.releaseLayer(m.parent) + } + + return []Metadata{}, nil +} + +func (ls *layerStore) saveMount(mount *mountedLayer) error { + if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { + return err + } + + if mount.initID != "" { + if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { + return err + } + } + + if mount.parent != nil { + if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { + return err + } + } + + ls.mounts[mount.name] = mount + + return nil +} + +func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit) (string, error) { + // Use "-init" to maintain compatibility with graph drivers + // which are expecting this layer with this special name. If all + // graph drivers can be updated to not rely on knowing about this layer + // then the initID should be randomly generated. + initID := fmt.Sprintf("%s-init", graphID) + + if err := ls.driver.Create(initID, parent, mountLabel); err != nil { + return "", err + } + p, err := ls.driver.Get(initID, "") + if err != nil { + return "", err + } + + if err := initFunc(p); err != nil { + ls.driver.Put(initID) + return "", err + } + + if err := ls.driver.Put(initID); err != nil { + return "", err + } + + return initID, nil +} + +func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { + diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) + if !ok { + diffDriver = &naiveDiffPathDriver{ls.driver} + } + + defer metadata.Close() + + // get our relative path to the container + fileGetCloser, err := diffDriver.DiffGetter(graphID) + if err != nil { + return err + } + defer fileGetCloser.Close() + + metaUnpacker := storage.NewJSONUnpacker(metadata) + upackerCounter := &unpackSizeCounter{metaUnpacker, size} + logrus.Debugf("Assembling tar data for %s", graphID) + return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) +} + +func (ls *layerStore) Cleanup() error { + return ls.driver.Cleanup() +} + +func (ls *layerStore) DriverStatus() [][2]string { + return ls.driver.Status() +} + +func (ls *layerStore) DriverName() string { + return ls.driver.String() +} + +type naiveDiffPathDriver struct { + graphdriver.Driver +} + +type fileGetPutter struct { + storage.FileGetter + driver graphdriver.Driver + id string +} + +func (w *fileGetPutter) Close() error { + return w.driver.Put(w.id) +} + +func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p, err := n.Driver.Get(id, "") + if err != nil { + return nil, err + } + return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil +} diff --git a/vendor/github.com/docker/docker/layer/layer_unix.go b/vendor/github.com/docker/docker/layer/layer_unix.go new file mode 100644 index 00000000..d77e2fc6 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd darwin openbsd + +package layer + +import "github.com/docker/docker/pkg/stringid" + +func (ls *layerStore) mountID(name string) string { + return stringid.GenerateRandomID() +} diff --git a/vendor/github.com/docker/docker/layer/migration.go b/vendor/github.com/docker/docker/layer/migration.go new file mode 100644 index 00000000..b45c3109 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/migration.go @@ -0,0 +1,256 @@ +package layer + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// CreateRWLayerByGraphID creates a RWLayer in the layer store using +// the provided name with the given graphID. To get the RWLayer +// after migration the layer may be retrieved by the given name. +func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + if m.parent.chainID != parent { + return errors.New("name conflict, mismatched parent") + } + if m.mountID != graphID { + return errors.New("mount already exists") + } + + return nil + } + + if !ls.driver.Exists(graphID) { + return fmt.Errorf("graph ID does not exist: %q", graphID) + } + + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // TODO: Ensure graphID has correct parent + + m = &mountedLayer{ + name: name, + parent: p, + mountID: graphID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + // Check for existing init layer + initID := fmt.Sprintf("%s-init", graphID) + if ls.driver.Exists(initID) { + m.initID = initID + } + + if err = ls.saveMount(m); err != nil { + return err + } + + return nil +} + +func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { + defer func() { + if err != nil { + logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) + diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) + } + }() + + if oldTarDataPath == "" { + err = errors.New("no tar-split file") + return + } + + tarDataFile, err := os.Open(oldTarDataPath) + if err != nil { + return + } + defer tarDataFile.Close() + uncompressed, err := gzip.NewReader(tarDataFile) + if err != nil { + return + } + + dgst := digest.Canonical.New() + err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) + if err != nil { + return + } + + diffID = DiffID(dgst.Digest()) + err = os.RemoveAll(newTarDataPath) + if err != nil { + return + } + err = os.Link(oldTarDataPath, newTarDataPath) + + return +} + +func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { + rawarchive, err := ls.driver.Diff(id, parent) + if err != nil { + return + } + defer rawarchive.Close() + + f, err := os.Create(newTarDataPath) + if err != nil { + return + } + defer f.Close() + mfz := gzip.NewWriter(f) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + packerCounter := &packSizeCounter{metaPacker, &size} + + archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) + if err != nil { + return + } + dgst, err := digest.FromReader(archive) + if err != nil { + return + } + diffID = DiffID(dgst) + return +} + +func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: graphID, + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + diffID: diffID, + size: size, + chainID: createChainIDFromParent(parent, diffID), + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + tsw, err := tx.TarSplitWriter(false) + if err != nil { + return nil, err + } + defer tsw.Close() + tdf, err := os.Open(tarDataFile) + if err != nil { + return nil, err + } + defer tdf.Close() + _, err = io.Copy(tsw, tdf) + if err != nil { + return nil, err + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +type unpackSizeCounter struct { + unpacker storage.Unpacker + size *int64 +} + +func (u *unpackSizeCounter) Next() (*storage.Entry, error) { + e, err := u.unpacker.Next() + if err == nil && u.size != nil { + *u.size += e.Size + } + return e, err +} + +type packSizeCounter struct { + packer storage.Packer + size *int64 +} + +func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { + n, err := p.packer.AddEntry(e) + if err == nil && p.size != nil { + *p.size += e.Size + } + return n, err +} diff --git a/vendor/github.com/docker/docker/layer/mounted_layer.go b/vendor/github.com/docker/docker/layer/mounted_layer.go new file mode 100644 index 00000000..5a07fd08 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/mounted_layer.go @@ -0,0 +1,188 @@ +package layer + +import ( + "io" + "sync" + + "github.com/docker/docker/pkg/archive" +) + +type mountedLayer struct { + name string + mountID string + initID string + parent *roLayer + path string + layerStore *layerStore + + references map[RWLayer]*referencedRWLayer +} + +func (ml *mountedLayer) cacheParent() string { + if ml.initID != "" { + return ml.initID + } + if ml.parent != nil { + return ml.parent.cacheID + } + return "" +} + +func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { + archiver, err := ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) + if err != nil { + return nil, err + } + return archiver, nil +} + +func (ml *mountedLayer) Name() string { + return ml.name +} + +func (ml *mountedLayer) Parent() Layer { + if ml.parent != nil { + return ml.parent + } + + // Return a nil interface instead of an interface wrapping a nil + // pointer. + return nil +} + +func (ml *mountedLayer) Mount(mountLabel string) (string, error) { + return ml.layerStore.driver.Get(ml.mountID, mountLabel) +} + +func (ml *mountedLayer) Unmount() error { + return ml.layerStore.driver.Put(ml.mountID) +} + +func (ml *mountedLayer) Size() (int64, error) { + return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Changes() ([]archive.Change, error) { + return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Metadata() (map[string]string, error) { + return ml.layerStore.driver.GetMetadata(ml.mountID) +} + +func (ml *mountedLayer) getReference() RWLayer { + ref := &referencedRWLayer{ + mountedLayer: ml, + } + ml.references[ref] = ref + + return ref +} + +func (ml *mountedLayer) hasReferences() bool { + return len(ml.references) > 0 +} + +func (ml *mountedLayer) incActivityCount(ref RWLayer) error { + rl, ok := ml.references[ref] + if !ok { + return ErrLayerNotRetained + } + + if err := rl.acquire(); err != nil { + return err + } + return nil +} + +func (ml *mountedLayer) deleteReference(ref RWLayer) error { + rl, ok := ml.references[ref] + if !ok { + return ErrLayerNotRetained + } + + if err := rl.release(); err != nil { + return err + } + delete(ml.references, ref) + + return nil +} + +func (ml *mountedLayer) retakeReference(r RWLayer) { + if ref, ok := r.(*referencedRWLayer); ok { + ref.activityCount = 0 + ml.references[ref] = ref + } +} + +type referencedRWLayer struct { + *mountedLayer + + activityL sync.Mutex + activityCount int +} + +func (rl *referencedRWLayer) acquire() error { + rl.activityL.Lock() + defer rl.activityL.Unlock() + + rl.activityCount++ + + return nil +} + +func (rl *referencedRWLayer) release() error { + rl.activityL.Lock() + defer rl.activityL.Unlock() + + if rl.activityCount > 0 { + return ErrActiveMount + } + + rl.activityCount = -1 + + return nil +} + +func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { + rl.activityL.Lock() + defer rl.activityL.Unlock() + + if rl.activityCount == -1 { + return "", ErrLayerNotRetained + } + + if rl.activityCount > 0 { + rl.activityCount++ + return rl.path, nil + } + + m, err := rl.mountedLayer.Mount(mountLabel) + if err == nil { + rl.activityCount++ + rl.path = m + } + return m, err +} + +// Unmount decrements the activity count and unmounts the underlying layer +// Callers should only call `Unmount` once per call to `Mount`, even on error. +func (rl *referencedRWLayer) Unmount() error { + rl.activityL.Lock() + defer rl.activityL.Unlock() + + if rl.activityCount == 0 { + return ErrNotMounted + } + if rl.activityCount == -1 { + return ErrLayerNotRetained + } + + rl.activityCount-- + if rl.activityCount > 0 { + return nil + } + + return rl.mountedLayer.Unmount() +} diff --git a/vendor/github.com/docker/docker/layer/ro_layer.go b/vendor/github.com/docker/docker/layer/ro_layer.go new file mode 100644 index 00000000..92b0ea0e --- /dev/null +++ b/vendor/github.com/docker/docker/layer/ro_layer.go @@ -0,0 +1,164 @@ +package layer + +import ( + "fmt" + "io" + + "github.com/docker/distribution/digest" +) + +type roLayer struct { + chainID ChainID + diffID DiffID + parent *roLayer + cacheID string + size int64 + layerStore *layerStore + + referenceCount int + references map[Layer]struct{} +} + +func (rl *roLayer) TarStream() (io.ReadCloser, error) { + r, err := rl.layerStore.store.TarSplitReader(rl.chainID) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + go func() { + err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID)) + if err != nil { + return nil, err + } + return rc, nil +} + +func (rl *roLayer) ChainID() ChainID { + return rl.chainID +} + +func (rl *roLayer) DiffID() DiffID { + return rl.diffID +} + +func (rl *roLayer) Parent() Layer { + if rl.parent == nil { + return nil + } + return rl.parent +} + +func (rl *roLayer) Size() (size int64, err error) { + if rl.parent != nil { + size, err = rl.parent.Size() + if err != nil { + return + } + } + + return size + rl.size, nil +} + +func (rl *roLayer) DiffSize() (size int64, err error) { + return rl.size, nil +} + +func (rl *roLayer) Metadata() (map[string]string, error) { + return rl.layerStore.driver.GetMetadata(rl.cacheID) +} + +type referencedCacheLayer struct { + *roLayer +} + +func (rl *roLayer) getReference() Layer { + ref := &referencedCacheLayer{ + roLayer: rl, + } + rl.references[ref] = struct{}{} + + return ref +} + +func (rl *roLayer) hasReference(ref Layer) bool { + _, ok := rl.references[ref] + return ok +} + +func (rl *roLayer) hasReferences() bool { + return len(rl.references) > 0 +} + +func (rl *roLayer) deleteReference(ref Layer) { + delete(rl.references, ref) +} + +func (rl *roLayer) depth() int { + if rl.parent == nil { + return 1 + } + return rl.parent.depth() + 1 +} + +func storeLayer(tx MetadataTransaction, layer *roLayer) error { + if err := tx.SetDiffID(layer.diffID); err != nil { + return err + } + if err := tx.SetSize(layer.size); err != nil { + return err + } + if err := tx.SetCacheID(layer.cacheID); err != nil { + return err + } + if layer.parent != nil { + if err := tx.SetParent(layer.parent.chainID); err != nil { + return err + } + } + + return nil +} + +func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return nil, err + } + return &verifiedReadCloser{ + rc: rc, + dgst: dgst, + verifier: verifier, + }, nil +} + +type verifiedReadCloser struct { + rc io.ReadCloser + dgst digest.Digest + verifier digest.Verifier +} + +func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { + n, err = vrc.rc.Read(p) + if n > 0 { + if n, err := vrc.verifier.Write(p[:n]); err != nil { + return n, err + } + } + if err == io.EOF { + if !vrc.verifier.Verified() { + err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) + } + } + return +} +func (vrc *verifiedReadCloser) Close() error { + return vrc.rc.Close() +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client.go b/vendor/github.com/docker/docker/libcontainerd/client.go new file mode 100644 index 00000000..7e8e47bc --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client.go @@ -0,0 +1,46 @@ +package libcontainerd + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/locker" +) + +// clientCommon contains the platform agnostic fields used in the client structure +type clientCommon struct { + backend Backend + containers map[string]*container + locker *locker.Locker + mapMutex sync.RWMutex // protects read/write oprations from containers map +} + +func (clnt *client) lock(containerID string) { + clnt.locker.Lock(containerID) +} + +func (clnt *client) unlock(containerID string) { + clnt.locker.Unlock(containerID) +} + +// must hold a lock for cont.containerID +func (clnt *client) appendContainer(cont *container) { + clnt.mapMutex.Lock() + clnt.containers[cont.containerID] = cont + clnt.mapMutex.Unlock() +} +func (clnt *client) deleteContainer(friendlyName string) { + clnt.mapMutex.Lock() + delete(clnt.containers, friendlyName) + clnt.mapMutex.Unlock() +} + +func (clnt *client) getContainer(containerID string) (*container, error) { + clnt.mapMutex.RLock() + container, ok := clnt.containers[containerID] + defer clnt.mapMutex.RUnlock() + if !ok { + return nil, fmt.Errorf("invalid container: %s", containerID) // fixme: typed error + } + return container, nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_linux.go b/vendor/github.com/docker/docker/libcontainerd/client_linux.go new file mode 100644 index 00000000..8eab7512 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_linux.go @@ -0,0 +1,401 @@ +package libcontainerd + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/opencontainers/specs/specs-go" + "golang.org/x/net/context" +) + +type client struct { + clientCommon + + // Platform specific properties below here. + remote *remote + q queue + exitNotifiers map[string]*exitNotifier +} + +func (clnt *client) AddProcess(containerID, processFriendlyName string, specp Process) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + spec, err := container.spec() + if err != nil { + return err + } + sp := spec.Process + sp.Args = specp.Args + sp.Terminal = specp.Terminal + if specp.Env != nil { + sp.Env = specp.Env + } + if specp.Cwd != nil { + sp.Cwd = *specp.Cwd + } + if specp.User != nil { + sp.User = specs.User{ + UID: specp.User.UID, + GID: specp.User.GID, + AdditionalGids: specp.User.AdditionalGids, + } + } + if specp.Capabilities != nil { + sp.Capabilities = specp.Capabilities + } + + p := container.newProcess(processFriendlyName) + + r := &containerd.AddProcessRequest{ + Args: sp.Args, + Cwd: sp.Cwd, + Terminal: sp.Terminal, + Id: containerID, + Env: sp.Env, + User: &containerd.User{ + Uid: sp.User.UID, + Gid: sp.User.GID, + AdditionalGids: sp.User.AdditionalGids, + }, + Pid: processFriendlyName, + Stdin: p.fifo(syscall.Stdin), + Stdout: p.fifo(syscall.Stdout), + Stderr: p.fifo(syscall.Stderr), + Capabilities: sp.Capabilities, + ApparmorProfile: sp.ApparmorProfile, + SelinuxLabel: sp.SelinuxLabel, + NoNewPrivileges: sp.NoNewPrivileges, + Rlimits: convertRlimits(sp.Rlimits), + } + + iopipe, err := p.openFifos(sp.Terminal) + if err != nil { + return err + } + + if _, err := clnt.remote.apiClient.AddProcess(context.Background(), r); err != nil { + p.closeFifos(iopipe) + return err + } + + container.processes[processFriendlyName] = p + + clnt.unlock(containerID) + + if err := clnt.backend.AttachStreams(processFriendlyName, *iopipe); err != nil { + return err + } + clnt.lock(containerID) + + return nil +} + +func (clnt *client) prepareBundleDir(uid, gid int) (string, error) { + root, err := filepath.Abs(clnt.remote.stateDir) + if err != nil { + return "", err + } + if uid == 0 && gid == 0 { + return root, nil + } + p := string(filepath.Separator) + for _, d := range strings.Split(root, string(filepath.Separator))[1:] { + p = filepath.Join(p, d) + fi, err := os.Stat(p) + if err != nil && !os.IsNotExist(err) { + return "", err + } + if os.IsNotExist(err) || fi.Mode()&1 == 0 { + p = fmt.Sprintf("%s.%d.%d", p, uid, gid) + if err := idtools.MkdirAs(p, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + } + } + return p, nil +} + +func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) (err error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + if ctr, err := clnt.getContainer(containerID); err == nil { + if ctr.restarting { + ctr.restartManager.Cancel() + ctr.clean() + } else { + return fmt.Errorf("Container %s is aleady active", containerID) + } + } + + uid, gid, err := getRootIDs(specs.Spec(spec)) + if err != nil { + return err + } + dir, err := clnt.prepareBundleDir(uid, gid) + if err != nil { + return err + } + + container := clnt.newContainer(filepath.Join(dir, containerID), options...) + if err := container.clean(); err != nil { + return err + } + + defer func() { + if err != nil { + container.clean() + clnt.deleteContainer(containerID) + } + }() + + if err := idtools.MkdirAllAs(container.dir, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + + f, err := os.Create(filepath.Join(container.dir, configFilename)) + if err != nil { + return err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(spec); err != nil { + return err + } + + return container.start() +} + +func (clnt *client) Signal(containerID string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ + Id: containerID, + Pid: InitFriendlyName, + Signal: uint32(sig), + }) + return err +} + +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + _, err := clnt.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: containerID, + Pid: processFriendlyName, + Width: uint32(width), + Height: uint32(height), + }) + return err +} + +func (clnt *client) Pause(containerID string) error { + return clnt.setState(containerID, StatePause) +} + +func (clnt *client) setState(containerID, state string) error { + clnt.lock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + clnt.unlock(containerID) + return err + } + if container.systemPid == 0 { + clnt.unlock(containerID) + return fmt.Errorf("No active process for container %s", containerID) + } + st := "running" + if state == StatePause { + st = "paused" + } + chstate := make(chan struct{}) + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Status: st, + }) + if err != nil { + clnt.unlock(containerID) + return err + } + container.pauseMonitor.append(state, chstate) + clnt.unlock(containerID) + <-chstate + return nil +} + +func (clnt *client) Resume(containerID string) error { + return clnt.setState(containerID, StateResume) +} + +func (clnt *client) Stats(containerID string) (*Stats, error) { + resp, err := clnt.remote.apiClient.Stats(context.Background(), &containerd.StatsRequest{containerID}) + if err != nil { + return nil, err + } + return (*Stats)(resp), nil +} + +// Take care of the old 1.11.0 behavior in case the version upgrade +// happenned without a clean daemon shutdown +func (clnt *client) cleanupOldRootfs(containerID string) { + // Unmount and delete the bundle folder + if mts, err := mount.GetMounts(); err == nil { + for _, mts := range mts { + if strings.HasSuffix(mts.Mountpoint, containerID+"/rootfs") { + if err := syscall.Unmount(mts.Mountpoint, syscall.MNT_DETACH); err == nil { + os.RemoveAll(strings.TrimSuffix(mts.Mountpoint, "/rootfs")) + } + break + } + } + } +} + +func (clnt *client) setExited(containerID string) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + var exitCode uint32 + if event, ok := clnt.remote.pastEvents[containerID]; ok { + exitCode = event.Status + delete(clnt.remote.pastEvents, containerID) + } + + err := clnt.backend.StateChanged(containerID, StateInfo{ + State: StateExit, + ExitCode: exitCode, + }) + + clnt.cleanupOldRootfs(containerID) + + return err +} + +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + cont, err := clnt.getContainerdContainer(containerID) + if err != nil { + return nil, err + } + pids := make([]int, len(cont.Pids)) + for i, p := range cont.Pids { + pids[i] = int(p) + } + return pids, nil +} + +// Summary returns a summary of the processes running in a container. +// This is a no-op on Linux. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + return nil, nil +} + +func (clnt *client) getContainerdContainer(containerID string) (*containerd.Container, error) { + resp, err := clnt.remote.apiClient.State(context.Background(), &containerd.StateRequest{Id: containerID}) + if err != nil { + return nil, err + } + for _, cont := range resp.Containers { + if cont.Id == containerID { + return cont, nil + } + } + return nil, fmt.Errorf("invalid state response") +} + +func (clnt *client) newContainer(dir string, options ...CreateOption) *container { + container := &container{ + containerCommon: containerCommon{ + process: process{ + dir: dir, + processCommon: processCommon{ + containerID: filepath.Base(dir), + client: clnt, + friendlyName: InitFriendlyName, + }, + }, + processes: make(map[string]*process), + }, + } + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Error(err) + } + } + return container +} + +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + if container.systemPid == 0 { + return fmt.Errorf("No active process for container %s", containerID) + } + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Resources: (*containerd.UpdateResource)(&resources), + }) + if err != nil { + return err + } + return nil +} + +func (clnt *client) getExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.RLock() + defer clnt.mapMutex.RUnlock() + return clnt.exitNotifiers[containerID] +} + +func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.Lock() + w, ok := clnt.exitNotifiers[containerID] + defer clnt.mapMutex.Unlock() + if !ok { + w = &exitNotifier{c: make(chan struct{}), client: clnt} + clnt.exitNotifiers[containerID] = w + } + return w +} + +type exitNotifier struct { + id string + client *client + c chan struct{} + once sync.Once +} + +func (en *exitNotifier) close() { + en.once.Do(func() { + close(en.c) + en.client.mapMutex.Lock() + if en == en.client.exitNotifiers[en.id] { + delete(en.client.exitNotifiers, en.id) + } + en.client.mapMutex.Unlock() + }) +} +func (en *exitNotifier) wait() <-chan struct{} { + return en.c +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_liverestore_linux.go b/vendor/github.com/docker/docker/libcontainerd/client_liverestore_linux.go new file mode 100644 index 00000000..1a1f7fe7 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_liverestore_linux.go @@ -0,0 +1,83 @@ +// +build experimental + +package libcontainerd + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" +) + +func (clnt *client) restore(cont *containerd.Container, options ...CreateOption) (err error) { + clnt.lock(cont.Id) + defer clnt.unlock(cont.Id) + + logrus.Debugf("restore container %s state %s", cont.Id, cont.Status) + + containerID := cont.Id + if _, err := clnt.getContainer(containerID); err == nil { + return fmt.Errorf("container %s is aleady active", containerID) + } + + defer func() { + if err != nil { + clnt.deleteContainer(cont.Id) + } + }() + + container := clnt.newContainer(cont.BundlePath, options...) + container.systemPid = systemPid(cont) + + var terminal bool + for _, p := range cont.Processes { + if p.Pid == InitFriendlyName { + terminal = p.Terminal + } + } + + iopipe, err := container.openFifos(terminal) + if err != nil { + return err + } + + if err := clnt.backend.AttachStreams(containerID, *iopipe); err != nil { + return err + } + + clnt.appendContainer(container) + + err = clnt.backend.StateChanged(containerID, StateInfo{ + State: StateRestore, + Pid: container.systemPid, + }) + + if err != nil { + return err + } + + if event, ok := clnt.remote.pastEvents[containerID]; ok { + // This should only be a pause or resume event + if event.Type == StatePause || event.Type == StateResume { + return clnt.backend.StateChanged(containerID, StateInfo{ + State: event.Type, + Pid: container.systemPid, + }) + } + + logrus.Warnf("unexpected backlog event: %#v", event) + } + + return nil +} + +func (clnt *client) Restore(containerID string, options ...CreateOption) error { + cont, err := clnt.getContainerdContainer(containerID) + if err == nil && cont.Status != "stopped" { + if err := clnt.restore(cont, options...); err != nil { + logrus.Errorf("error restoring %s: %v", containerID, err) + } + return nil + } + return clnt.setExited(containerID) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_shutdownrestore_linux.go b/vendor/github.com/docker/docker/libcontainerd/client_shutdownrestore_linux.go new file mode 100644 index 00000000..1b4a2bc5 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_shutdownrestore_linux.go @@ -0,0 +1,46 @@ +// +build !experimental + +package libcontainerd + +import ( + "syscall" + "time" + + "github.com/Sirupsen/logrus" +) + +func (clnt *client) Restore(containerID string, options ...CreateOption) error { + w := clnt.getOrCreateExitNotifier(containerID) + defer w.close() + cont, err := clnt.getContainerdContainer(containerID) + if err == nil && cont.Status != "stopped" { + clnt.lock(cont.Id) + container := clnt.newContainer(cont.BundlePath) + container.systemPid = systemPid(cont) + clnt.appendContainer(container) + clnt.unlock(cont.Id) + + container.discardFifos() + + if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil { + logrus.Errorf("error sending sigterm to %v: %v", containerID, err) + } + select { + case <-time.After(10 * time.Second): + if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil { + logrus.Errorf("error sending sigkill to %v: %v", containerID, err) + } + select { + case <-time.After(2 * time.Second): + case <-w.wait(): + return nil + } + case <-w.wait(): + return nil + } + } + + clnt.deleteContainer(containerID) + + return clnt.setExited(containerID) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/container.go b/vendor/github.com/docker/docker/libcontainerd/container.go new file mode 100644 index 00000000..30bc9502 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/container.go @@ -0,0 +1,40 @@ +package libcontainerd + +import ( + "fmt" + "time" + + "github.com/docker/docker/restartmanager" +) + +const ( + // InitFriendlyName is the name given in the lookup map of processes + // for the first process started in a container. + InitFriendlyName = "init" + configFilename = "config.json" +) + +type containerCommon struct { + process + restartManager restartmanager.RestartManager + restarting bool + processes map[string]*process + startedAt time.Time +} + +// WithRestartManager sets the restartmanager to be used with the container. +func WithRestartManager(rm restartmanager.RestartManager) CreateOption { + return restartManager{rm} +} + +type restartManager struct { + rm restartmanager.RestartManager +} + +func (rm restartManager) Apply(p interface{}) error { + if pr, ok := p.(*container); ok { + pr.restartManager = rm.rm + return nil + } + return fmt.Errorf("WithRestartManager option not supported for this client") +} diff --git a/vendor/github.com/docker/docker/libcontainerd/container_linux.go b/vendor/github.com/docker/docker/libcontainerd/container_linux.go new file mode 100644 index 00000000..8a49cde2 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/container_linux.go @@ -0,0 +1,209 @@ +package libcontainerd + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/restartmanager" + "github.com/opencontainers/specs/specs-go" + "golang.org/x/net/context" +) + +type container struct { + containerCommon + + // Platform specific fields are below here. + pauseMonitor + oom bool +} + +func (ctr *container) clean() error { + if os.Getenv("LIBCONTAINERD_NOCLEAN") == "1" { + return nil + } + if _, err := os.Lstat(ctr.dir); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + if err := os.RemoveAll(ctr.dir); err != nil { + return err + } + return nil +} + +// cleanProcess removes the fifos used by an additional process. +// Caller needs to lock container ID before calling this method. +func (ctr *container) cleanProcess(id string) { + if p, ok := ctr.processes[id]; ok { + for _, i := range []int{syscall.Stdin, syscall.Stdout, syscall.Stderr} { + if err := os.Remove(p.fifo(i)); err != nil { + logrus.Warnf("failed to remove %v for process %v: %v", p.fifo(i), id, err) + } + } + } + delete(ctr.processes, id) +} + +func (ctr *container) spec() (*specs.Spec, error) { + var spec specs.Spec + dt, err := ioutil.ReadFile(filepath.Join(ctr.dir, configFilename)) + if err != nil { + return nil, err + } + if err := json.Unmarshal(dt, &spec); err != nil { + return nil, err + } + return &spec, nil +} + +func (ctr *container) start() error { + spec, err := ctr.spec() + if err != nil { + return nil + } + iopipe, err := ctr.openFifos(spec.Process.Terminal) + if err != nil { + return err + } + + r := &containerd.CreateContainerRequest{ + Id: ctr.containerID, + BundlePath: ctr.dir, + Stdin: ctr.fifo(syscall.Stdin), + Stdout: ctr.fifo(syscall.Stdout), + Stderr: ctr.fifo(syscall.Stderr), + // check to see if we are running in ramdisk to disable pivot root + NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "", + } + ctr.client.appendContainer(ctr) + + resp, err := ctr.client.remote.apiClient.CreateContainer(context.Background(), r) + if err != nil { + ctr.closeFifos(iopipe) + return err + } + ctr.startedAt = time.Now() + + if err := ctr.client.backend.AttachStreams(ctr.containerID, *iopipe); err != nil { + return err + } + ctr.systemPid = systemPid(resp.Container) + + return ctr.client.backend.StateChanged(ctr.containerID, StateInfo{ + State: StateStart, + Pid: ctr.systemPid, + }) +} + +func (ctr *container) newProcess(friendlyName string) *process { + return &process{ + dir: ctr.dir, + processCommon: processCommon{ + containerID: ctr.containerID, + friendlyName: friendlyName, + client: ctr.client, + }, + } +} + +func (ctr *container) handleEvent(e *containerd.Event) error { + ctr.client.lock(ctr.containerID) + defer ctr.client.unlock(ctr.containerID) + switch e.Type { + case StateExit, StatePause, StateResume, StateOOM: + st := StateInfo{ + State: e.Type, + ExitCode: e.Status, + OOMKilled: e.Type == StateExit && ctr.oom, + } + if e.Type == StateOOM { + ctr.oom = true + } + if e.Type == StateExit && e.Pid != InitFriendlyName { + st.ProcessID = e.Pid + st.State = StateExitProcess + } + if st.State == StateExit && ctr.restartManager != nil { + restart, wait, err := ctr.restartManager.ShouldRestart(e.Status, false, time.Since(ctr.startedAt)) + if err != nil { + logrus.Warnf("container %s %v", ctr.containerID, err) + } else if restart { + st.State = StateRestart + ctr.restarting = true + ctr.client.deleteContainer(e.Id) + go func() { + err := <-wait + ctr.client.lock(ctr.containerID) + defer ctr.client.unlock(ctr.containerID) + ctr.restarting = false + if err != nil { + st.State = StateExit + ctr.clean() + ctr.client.q.append(e.Id, func() { + if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { + logrus.Error(err) + } + }) + if err != restartmanager.ErrRestartCanceled { + logrus.Error(err) + } + } else { + ctr.start() + } + }() + } + } + + // Remove process from list if we have exited + // We need to do so here in case the Message Handler decides to restart it. + switch st.State { + case StateExit: + ctr.clean() + ctr.client.deleteContainer(e.Id) + case StateExitProcess: + ctr.cleanProcess(st.ProcessID) + } + ctr.client.q.append(e.Id, func() { + if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { + logrus.Error(err) + } + if e.Type == StatePause || e.Type == StateResume { + ctr.pauseMonitor.handle(e.Type) + } + if e.Type == StateExit { + if en := ctr.client.getExitNotifier(e.Id); en != nil { + en.close() + } + } + }) + + default: + logrus.Debugf("event unhandled: %+v", e) + } + return nil +} + +// discardFifos attempts to fully read the container fifos to unblock processes +// that may be blocked on the writer side. +func (ctr *container) discardFifos() { + for _, i := range []int{syscall.Stdout, syscall.Stderr} { + f := ctr.fifo(i) + c := make(chan struct{}) + go func() { + close(c) // this channel is used to not close the writer too early, before readonly open has been called. + io.Copy(ioutil.Discard, openReaderFromFifo(f)) + }() + <-c + closeReaderFifo(f) // avoid blocking permanently on open if there is no writer side + } +} diff --git a/vendor/github.com/docker/docker/libcontainerd/pausemonitor_linux.go b/vendor/github.com/docker/docker/libcontainerd/pausemonitor_linux.go new file mode 100644 index 00000000..379cbf1f --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/pausemonitor_linux.go @@ -0,0 +1,31 @@ +package libcontainerd + +// pauseMonitor is helper to get notifications from pause state changes. +type pauseMonitor struct { + waiters map[string][]chan struct{} +} + +func (m *pauseMonitor) handle(t string) { + if m.waiters == nil { + return + } + q, ok := m.waiters[t] + if !ok { + return + } + if len(q) > 0 { + close(q[0]) + m.waiters[t] = q[1:] + } +} + +func (m *pauseMonitor) append(t string, waiter chan struct{}) { + if m.waiters == nil { + m.waiters = make(map[string][]chan struct{}) + } + _, ok := m.waiters[t] + if !ok { + m.waiters[t] = make([]chan struct{}, 0) + } + m.waiters[t] = append(m.waiters[t], waiter) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/process.go b/vendor/github.com/docker/docker/libcontainerd/process.go new file mode 100644 index 00000000..57562c87 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/process.go @@ -0,0 +1,18 @@ +package libcontainerd + +// processCommon are the platform common fields as part of the process structure +// which keeps the state for the main container process, as well as any exec +// processes. +type processCommon struct { + client *client + + // containerID is the Container ID + containerID string + + // friendlyName is an identifier for the process (or `InitFriendlyName` + // for the first process) + friendlyName string + + // systemPid is the PID of the main container process + systemPid uint32 +} diff --git a/vendor/github.com/docker/docker/libcontainerd/process_linux.go b/vendor/github.com/docker/docker/libcontainerd/process_linux.go new file mode 100644 index 00000000..3c48576f --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/process_linux.go @@ -0,0 +1,110 @@ +package libcontainerd + +import ( + "fmt" + "io" + "os" + "path/filepath" + "syscall" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +var fdNames = map[int]string{ + syscall.Stdin: "stdin", + syscall.Stdout: "stdout", + syscall.Stderr: "stderr", +} + +// process keeps the state for both main container process and exec process. +type process struct { + processCommon + + // Platform specific fields are below here. + dir string +} + +func (p *process) openFifos(terminal bool) (*IOPipe, error) { + bundleDir := p.dir + if err := os.MkdirAll(bundleDir, 0700); err != nil { + return nil, err + } + + for i := 0; i < 3; i++ { + f := p.fifo(i) + if err := syscall.Mkfifo(f, 0700); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("mkfifo: %s %v", f, err) + } + } + + io := &IOPipe{} + stdinf, err := os.OpenFile(p.fifo(syscall.Stdin), syscall.O_RDWR, 0) + if err != nil { + return nil, err + } + + io.Stdout = openReaderFromFifo(p.fifo(syscall.Stdout)) + if !terminal { + io.Stderr = openReaderFromFifo(p.fifo(syscall.Stderr)) + } else { + io.Stderr = emptyReader{} + } + + io.Stdin = ioutils.NewWriteCloserWrapper(stdinf, func() error { + stdinf.Close() + _, err := p.client.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: p.containerID, + Pid: p.friendlyName, + CloseStdin: true, + }) + return err + }) + + return io, nil +} + +func (p *process) closeFifos(io *IOPipe) { + io.Stdin.Close() + closeReaderFifo(p.fifo(syscall.Stdout)) + closeReaderFifo(p.fifo(syscall.Stderr)) +} + +type emptyReader struct{} + +func (r emptyReader) Read(b []byte) (int, error) { + return 0, io.EOF +} + +func openReaderFromFifo(fn string) io.Reader { + r, w := io.Pipe() + c := make(chan struct{}) + go func() { + close(c) + stdoutf, err := os.OpenFile(fn, syscall.O_RDONLY, 0) + if err != nil { + r.CloseWithError(err) + } + if _, err := io.Copy(w, stdoutf); err != nil { + r.CloseWithError(err) + } + w.Close() + stdoutf.Close() + }() + <-c // wait for the goroutine to get scheduled and syscall to block + return r +} + +// closeReaderFifo closes fifo that may be blocked on open by opening the write side. +func closeReaderFifo(fn string) { + f, err := os.OpenFile(fn, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) + if err != nil { + return + } + f.Close() +} + +func (p *process) fifo(index int) string { + return filepath.Join(p.dir, p.friendlyName+"-"+fdNames[index]) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/queue_linux.go b/vendor/github.com/docker/docker/libcontainerd/queue_linux.go new file mode 100644 index 00000000..34bc81d2 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/queue_linux.go @@ -0,0 +1,29 @@ +package libcontainerd + +import "sync" + +type queue struct { + sync.Mutex + fns map[string]chan struct{} +} + +func (q *queue) append(id string, f func()) { + q.Lock() + defer q.Unlock() + + if q.fns == nil { + q.fns = make(map[string]chan struct{}) + } + + done := make(chan struct{}) + + fn, ok := q.fns[id] + q.fns[id] = done + go func() { + if ok { + <-fn + } + f() + close(done) + }() +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote.go b/vendor/github.com/docker/docker/libcontainerd/remote.go new file mode 100644 index 00000000..a679edcf --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote.go @@ -0,0 +1,18 @@ +package libcontainerd + +// Remote on Linux defines the accesspoint to the containerd grpc API. +// Remote on Windows is largely an unimplemented interface as there is +// no remote containerd. +type Remote interface { + // Client returns a new Client instance connected with given Backend. + Client(Backend) (Client, error) + // Cleanup stops containerd if it was started by libcontainerd. + // Note this is not used on Windows as there is no remote containerd. + Cleanup() +} + +// RemoteOption allows to configure paramters of remotes. +// This is unused on Windows. +type RemoteOption interface { + Apply(Remote) error +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_linux.go b/vendor/github.com/docker/docker/libcontainerd/remote_linux.go new file mode 100644 index 00000000..fa339ce4 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote_linux.go @@ -0,0 +1,290 @@ +package libcontainerd + +import ( + "fmt" + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/locker" + sysinfo "github.com/docker/docker/pkg/system" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/transport" +) + +const ( + maxConnectionRetryCount = 3 + connectionRetryDelay = 3 * time.Second + containerdShutdownTimeout = 15 * time.Second + containerdBinary = "docker-containerd" + containerdPidFilename = "docker-containerd.pid" + containerdSockFilename = "docker-containerd.sock" + eventTimestampFilename = "event.ts" +) + +type remote struct { + sync.RWMutex + apiClient containerd.APIClient + daemonPid int + stateDir string + rpcAddr string + startDaemon bool + closeManually bool + debugLog bool + rpcConn *grpc.ClientConn + clients []*client + eventTsPath string + pastEvents map[string]*containerd.Event + runtimeArgs []string +} + +// New creates a fresh instance of libcontainerd remote. +func New(stateDir string, options ...RemoteOption) (_ Remote, err error) { + defer func() { + if err != nil { + err = fmt.Errorf("Failed to connect to containerd. Please make sure containerd is installed in your PATH or you have specificed the correct address. Got error: %v", err) + } + }() + r := &remote{ + stateDir: stateDir, + daemonPid: -1, + eventTsPath: filepath.Join(stateDir, eventTimestampFilename), + pastEvents: make(map[string]*containerd.Event), + } + for _, option := range options { + if err := option.Apply(r); err != nil { + return nil, err + } + } + + if err := sysinfo.MkdirAll(stateDir, 0700); err != nil { + return nil, err + } + + if r.rpcAddr == "" { + r.rpcAddr = filepath.Join(stateDir, containerdSockFilename) + } + + if r.startDaemon { + if err := r.runContainerdDaemon(); err != nil { + return nil, err + } + } + + if err := r.startEventsMonitor(); err != nil { + return nil, err + } + + return r, nil +} + +func (r *remote) Cleanup() { +} + +func (r *remote) Client(b Backend) (Client, error) { + c := &client{ + clientCommon: clientCommon{ + backend: b, + containers: make(map[string]*container), + locker: locker.New(), + }, + remote: r, + exitNotifiers: make(map[string]*exitNotifier), + } + + r.Lock() + r.clients = append(r.clients, c) + r.Unlock() + return c, nil +} + +func (r *remote) updateEventTimestamp(t time.Time) { + f, err := os.OpenFile(r.eventTsPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600) + defer f.Close() + if err != nil { + logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err) + return + } + + b, err := t.MarshalText() + if err != nil { + logrus.Warnf("libcontainerd: failed to encode timestamp: %v", err) + return + } + + n, err := f.Write(b) + if err != nil || n != len(b) { + logrus.Warnf("libcontainerd: failed to update event timestamp file: %v", err) + f.Truncate(0) + return + } + +} + +func (r *remote) getLastEventTimestamp() int64 { + t := time.Now() + + fi, err := os.Stat(r.eventTsPath) + if os.IsNotExist(err) || fi.Size() == 0 { + return t.Unix() + } + + f, err := os.Open(r.eventTsPath) + defer f.Close() + if err != nil { + logrus.Warn("libcontainerd: Unable to access last event ts: %v", err) + return t.Unix() + } + + b := make([]byte, fi.Size()) + n, err := f.Read(b) + if err != nil || n != len(b) { + logrus.Warn("libcontainerd: Unable to read last event ts: %v", err) + return t.Unix() + } + + t.UnmarshalText(b) + + return t.Unix() +} + +func (r *remote) startEventsMonitor() error { + // First, get past events + er := &containerd.EventsRequest{ + Timestamp: uint64(r.getLastEventTimestamp()), + } + events, err := r.apiClient.Events(context.Background(), er) + if err != nil { + return err + } + go r.handleEventStream(events) + return nil +} + +func (r *remote) handleEventStream(events containerd.API_EventsClient) { + live := false + for { + e, err := events.Recv() + if err != nil { + if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc && + r.closeManually { + // ignore error if grpc remote connection is closed manually + return + } + logrus.Errorf("failed to receive event from containerd: %v", err) + go r.startEventsMonitor() + return + } + + if live == false { + logrus.Debugf("received past containerd event: %#v", e) + + // Pause/Resume events should never happens after exit one + switch e.Type { + case StateExit: + r.pastEvents[e.Id] = e + case StatePause: + r.pastEvents[e.Id] = e + case StateResume: + r.pastEvents[e.Id] = e + case stateLive: + live = true + r.updateEventTimestamp(time.Unix(int64(e.Timestamp), 0)) + } + } else { + logrus.Debugf("received containerd event: %#v", e) + + var container *container + var c *client + r.RLock() + for _, c = range r.clients { + container, err = c.getContainer(e.Id) + if err == nil { + break + } + } + r.RUnlock() + if container == nil { + logrus.Errorf("no state for container: %q", err) + continue + } + + if err := container.handleEvent(e); err != nil { + logrus.Errorf("error processing state change for %s: %v", e.Id, err) + } + + r.updateEventTimestamp(time.Unix(int64(e.Timestamp), 0)) + } + } +} + +func (r *remote) runContainerdDaemon() error { + var err error + r.apiClient, err = newBridge(stateDir, 10, "docker-runc", r.runtimeArgs) + return err +} + +// WithRemoteAddr sets the external containerd socket to connect to. +func WithRemoteAddr(addr string) RemoteOption { + return rpcAddr(addr) +} + +type rpcAddr string + +func (a rpcAddr) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.rpcAddr = string(a) + return nil + } + return fmt.Errorf("WithRemoteAddr option not supported for this remote") +} + +// WithRuntimeArgs sets the list of runtime args passed to containerd +func WithRuntimeArgs(args []string) RemoteOption { + return runtimeArgs(args) +} + +type runtimeArgs []string + +func (rt runtimeArgs) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.runtimeArgs = rt + return nil + } + return fmt.Errorf("WithRuntimeArgs option not supported for this remote") +} + +// WithStartDaemon defines if libcontainerd should also run containerd daemon. +func WithStartDaemon(start bool) RemoteOption { + return startDaemon(start) +} + +type startDaemon bool + +func (s startDaemon) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.startDaemon = bool(s) + return nil + } + return fmt.Errorf("WithStartDaemon option not supported for this remote") +} + +// WithDebugLog defines if containerd debug logs will be enabled for daemon. +func WithDebugLog(debug bool) RemoteOption { + return debugLog(debug) +} + +type debugLog bool + +func (d debugLog) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.debugLog = bool(d) + return nil + } + return fmt.Errorf("WithDebugLog option not supported for this remote") +} diff --git a/vendor/github.com/docker/docker/libcontainerd/rpc_bridge.go b/vendor/github.com/docker/docker/libcontainerd/rpc_bridge.go new file mode 100644 index 00000000..d745298a --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/rpc_bridge.go @@ -0,0 +1,48 @@ +package libcontainerd + +import ( + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/api/grpc/server" + "github.com/docker/containerd/api/grpc/types" + "github.com/docker/containerd/subreaper" + "github.com/docker/containerd/supervisor" +) + +var ( + stateDir = "/run/containerd" +) + +type bridge struct { + s types.APIServer +} + +func newBridge(stateDir string, concurrency int, runtimeName string, runtimeArgs []string) (types.APIClient, error) { + s, err := daemon(stateDir, concurrency, runtimeName, runtimeArgs) + if err != nil { + return nil, err + } + return &bridge{s: s}, nil +} + +func daemon(stateDir string, concurrency int, runtimeName string, runtimeArgs []string) (types.APIServer, error) { + if err := subreaper.Start(); err != nil { + logrus.WithField("error", err).Error("containerd: start subreaper") + } + sv, err := supervisor.New(stateDir, runtimeName, "", runtimeArgs, 15*time.Second, 500) + if err != nil { + return nil, err + } + wg := &sync.WaitGroup{} + for i := 0; i < concurrency; i++ { + wg.Add(1) + w := supervisor.NewWorker(sv, wg) + go w.Start() + } + if err := sv.Start(); err != nil { + return nil, err + } + return server.NewServer(sv), nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/rpc_bridge_wrapper.go b/vendor/github.com/docker/docker/libcontainerd/rpc_bridge_wrapper.go new file mode 100644 index 00000000..0000bbd6 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/rpc_bridge_wrapper.go @@ -0,0 +1,131 @@ +package libcontainerd + +import ( + "io" + + "github.com/docker/containerd/api/grpc/types" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +func (b *bridge) CreateContainer(ctx context.Context, in *types.CreateContainerRequest, opts ...grpc.CallOption) (*types.CreateContainerResponse, error) { + return b.s.CreateContainer(ctx, in) +} + +func (b *bridge) UpdateContainer(ctx context.Context, in *types.UpdateContainerRequest, opts ...grpc.CallOption) (*types.UpdateContainerResponse, error) { + return b.s.UpdateContainer(ctx, in) +} + +func (b *bridge) Signal(ctx context.Context, in *types.SignalRequest, opts ...grpc.CallOption) (*types.SignalResponse, error) { + return b.s.Signal(ctx, in) +} + +func (b *bridge) UpdateProcess(ctx context.Context, in *types.UpdateProcessRequest, opts ...grpc.CallOption) (*types.UpdateProcessResponse, error) { + return b.s.UpdateProcess(ctx, in) +} + +func (b *bridge) AddProcess(ctx context.Context, in *types.AddProcessRequest, opts ...grpc.CallOption) (*types.AddProcessResponse, error) { + return b.s.AddProcess(ctx, in) +} + +func (b *bridge) CreateCheckpoint(ctx context.Context, in *types.CreateCheckpointRequest, opts ...grpc.CallOption) (*types.CreateCheckpointResponse, error) { + return b.s.CreateCheckpoint(ctx, in) +} + +func (b *bridge) DeleteCheckpoint(ctx context.Context, in *types.DeleteCheckpointRequest, opts ...grpc.CallOption) (*types.DeleteCheckpointResponse, error) { + return b.s.DeleteCheckpoint(ctx, in) +} + +func (b *bridge) ListCheckpoint(ctx context.Context, in *types.ListCheckpointRequest, opts ...grpc.CallOption) (*types.ListCheckpointResponse, error) { + return b.s.ListCheckpoint(ctx, in) +} + +func (b *bridge) State(ctx context.Context, in *types.StateRequest, opts ...grpc.CallOption) (*types.StateResponse, error) { + return b.s.State(ctx, in) +} + +func (b *bridge) GetServerVersion(ctx context.Context, in *types.GetServerVersionRequest, opts ...grpc.CallOption) (*types.GetServerVersionResponse, error) { + return b.s.GetServerVersion(ctx, in) +} + +func (b *bridge) Events(ctx context.Context, in *types.EventsRequest, opts ...grpc.CallOption) (types.API_EventsClient, error) { + c := make(chan *types.Event, 1024) + client := &aPI_EventsClient{ + c: c, + } + client.ctx = ctx + server := &aPI_EventsServer{ + c: c, + } + server.ctx = ctx + go b.s.Events(in, server) + return client, nil +} + +func (b *bridge) Stats(ctx context.Context, in *types.StatsRequest, opts ...grpc.CallOption) (*types.StatsResponse, error) { + return b.s.Stats(ctx, in) +} + +type aPI_EventsServer struct { + serverStream + c chan *types.Event +} + +func (a *aPI_EventsServer) Send(msg *types.Event) error { + a.c <- msg + return nil +} + +type serverStream struct { + stream +} + +func (s *serverStream) SendHeader(metadata.MD) error { + return nil +} + +func (s *serverStream) SetTrailer(metadata.MD) { +} + +type aPI_EventsClient struct { + clientStream + c chan *types.Event +} + +func (a *aPI_EventsClient) Recv() (*types.Event, error) { + e, ok := <-a.c + if !ok { + return nil, io.EOF + } + return e, nil +} + +type stream struct { + ctx context.Context +} + +func (s *stream) Context() context.Context { + return s.ctx +} +func (s *stream) SendMsg(m interface{}) error { + return nil +} +func (s *stream) RecvMsg(m interface{}) error { + return nil +} + +// ClientStream defines the interface a client stream has to satify. +type clientStream struct { + stream +} + +func (c *clientStream) Header() (metadata.MD, error) { + return nil, nil +} +func (c *clientStream) Trailer() metadata.MD { + return nil +} +func (c *clientStream) CloseSend() error { + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/types.go b/vendor/github.com/docker/docker/libcontainerd/types.go new file mode 100644 index 00000000..bb82fe40 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/types.go @@ -0,0 +1,60 @@ +package libcontainerd + +import "io" + +// State constants used in state change reporting. +const ( + StateStart = "start-container" + StatePause = "pause" + StateResume = "resume" + StateExit = "exit" + StateRestart = "restart" + StateRestore = "restore" + StateStartProcess = "start-process" + StateExitProcess = "exit-process" + StateOOM = "oom" // fake state + stateLive = "live" +) + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { // FIXME: event? + State string + Pid uint32 + ExitCode uint32 + ProcessID string + OOMKilled bool // TODO Windows containerd factor out +} + +// Backend defines callbacks that the client of the library needs to implement. +type Backend interface { + StateChanged(containerID string, state StateInfo) error + AttachStreams(processFriendlyName string, io IOPipe) error +} + +// Client provides access to containerd features. +type Client interface { + Create(containerID string, spec Spec, options ...CreateOption) error + Signal(containerID string, sig int) error + AddProcess(containerID, processFriendlyName string, process Process) error + Resize(containerID, processFriendlyName string, width, height int) error + Pause(containerID string) error + Resume(containerID string) error + Restore(containerID string, options ...CreateOption) error + Stats(containerID string) (*Stats, error) + GetPidsForContainer(containerID string) ([]int, error) + Summary(containerID string) ([]Summary, error) + UpdateResources(containerID string, resources Resources) error +} + +// CreateOption allows to configure parameters of container creation. +type CreateOption interface { + Apply(interface{}) error +} + +// IOPipe contains the stdio streams. +type IOPipe struct { + Stdin io.WriteCloser + Stdout io.Reader + Stderr io.Reader + Terminal bool // Whether stderr is connected on Windows +} diff --git a/vendor/github.com/docker/docker/libcontainerd/types_linux.go b/vendor/github.com/docker/docker/libcontainerd/types_linux.go new file mode 100644 index 00000000..bee12d91 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/types_linux.go @@ -0,0 +1,47 @@ +package libcontainerd + +import ( + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/specs/specs-go" +) + +// Spec is the base configuration for the container. It specifies platform +// independent configuration. This information must be included when the +// bundle is packaged for distribution. +type Spec specs.Spec + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal"` + // User specifies user information for the process. + User *User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd *string `json:"cwd"` + // Capabilities are linux capabilities that are kept for the container. + Capabilities []string `json:"capabilities,omitempty"` + // Rlimits specifies rlimit options to apply to the process. + Rlimits []specs.Rlimit `json:"rlimits,omitempty"` + // ApparmorProfile specified the apparmor profile for the container. + ApparmorProfile *string `json:"apparmorProfile,omitempty"` + // SelinuxProcessLabel specifies the selinux context that the container process is run as. + SelinuxLabel *string `json:"selinuxLabel,omitempty"` +} + +// Stats contains a stats properties from containerd. +type Stats containerd.StatsResponse + +// Summary container a container summary from containerd +type Summary struct{} + +// User specifies linux specific user and group information for the container's +// main process. +type User specs.User + +// Resources defines updatable container resource values. +type Resources containerd.UpdateResource diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_linux.go b/vendor/github.com/docker/docker/libcontainerd/utils_linux.go new file mode 100644 index 00000000..5b67244f --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/utils_linux.go @@ -0,0 +1,52 @@ +package libcontainerd + +import ( + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/specs/specs-go" +) + +func getRootIDs(s specs.Spec) (int, int, error) { + var hasUserns bool + for _, ns := range s.Linux.Namespaces { + if ns.Type == specs.UserNamespace { + hasUserns = true + break + } + } + if !hasUserns { + return 0, 0, nil + } + uid := hostIDFromMap(0, s.Linux.UIDMappings) + gid := hostIDFromMap(0, s.Linux.GIDMappings) + return uid, gid, nil +} + +func hostIDFromMap(id uint32, mp []specs.IDMapping) int { + for _, m := range mp { + if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 { + return int(m.HostID + id - m.ContainerID) + } + } + return 0 +} + +func systemPid(ctr *containerd.Container) uint32 { + var pid uint32 + for _, p := range ctr.Processes { + if p.Pid == InitFriendlyName { + pid = p.SystemPid + } + } + return pid +} + +func convertRlimits(sr []specs.Rlimit) (cr []*containerd.Rlimit) { + for _, r := range sr { + cr = append(cr, &containerd.Rlimit{ + Type: r.Type, + Hard: r.Hard, + Soft: r.Soft, + }) + } + return +} diff --git a/vendor/github.com/docker/docker/oci/defaults_linux.go b/vendor/github.com/docker/docker/oci/defaults_linux.go new file mode 100644 index 00000000..9ee75461 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/defaults_linux.go @@ -0,0 +1,210 @@ +package oci + +import ( + "os" + "runtime" + + "github.com/opencontainers/specs/specs-go" +) + +func sPtr(s string) *string { return &s } +func rPtr(r rune) *rune { return &r } +func iPtr(i int64) *int64 { return &i } +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +// DefaultSpec returns default oci spec used by docker. +func DefaultSpec() specs.Spec { + s := specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }, + } + s.Mounts = []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"ro", "nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + } + + s.Process.Capabilities = []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } + + s.Linux = specs.Linux{ + MaskedPaths: []string{ + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_stats", + "/proc/sched_debug", + }, + ReadonlyPaths: []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + }, + Namespaces: []specs.Namespace{ + {Type: "mount"}, + {Type: "network"}, + {Type: "uts"}, + {Type: "pid"}, + {Type: "ipc"}, + }, + Devices: []specs.Device{ + { + Type: "c", + Path: "/dev/zero", + Major: 1, + Minor: 5, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + { + Type: "c", + Path: "/dev/null", + Major: 1, + Minor: 3, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + { + Type: "c", + Path: "/dev/urandom", + Major: 1, + Minor: 9, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + { + Type: "c", + Path: "/dev/random", + Major: 1, + Minor: 8, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + { + Type: "c", + Path: "/dev/fuse", + Major: 10, + Minor: 229, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + }, + Resources: &specs.Resources{ + Devices: []specs.DeviceCgroup{ + { + Allow: false, + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(5), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(3), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(9), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(8), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(5), + Minor: iPtr(0), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(5), + Minor: iPtr(1), + Access: sPtr("rwm"), + }, + { + Allow: false, + Type: sPtr("c"), + Major: iPtr(10), + Minor: iPtr(229), + Access: sPtr("rwm"), + }, + }, + }, + } + + return s +} diff --git a/vendor/github.com/docker/docker/opts/envfile_test.go b/vendor/github.com/docker/docker/opts/envfile_test.go deleted file mode 100644 index a172267b..00000000 --- a/vendor/github.com/docker/docker/opts/envfile_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package opts - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "reflect" - "strings" - "testing" -) - -func tmpFileWithContent(content string, t *testing.T) string { - tmpFile, err := ioutil.TempFile("", "envfile-test") - if err != nil { - t.Fatal(err) - } - defer tmpFile.Close() - - tmpFile.WriteString(content) - return tmpFile.Name() -} - -// Test ParseEnvFile for a file with a few well formatted lines -func TestParseEnvFileGoodFile(t *testing.T) { - content := `foo=bar - baz=quux -# comment - -_foobar=foobaz -with.dots=working -and_underscore=working too -` - // Adding a newline + a line with pure whitespace. - // This is being done like this instead of the block above - // because it's common for editors to trim trailing whitespace - // from lines, which becomes annoying since that's the - // exact thing we need to test. - content += "\n \t " - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - lines, err := ParseEnvFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - expectedLines := []string{ - "foo=bar", - "baz=quux", - "_foobar=foobaz", - "with.dots=working", - "and_underscore=working too", - } - - if !reflect.DeepEqual(lines, expectedLines) { - t.Fatal("lines not equal to expected_lines") - } -} - -// Test ParseEnvFile for an empty file -func TestParseEnvFileEmptyFile(t *testing.T) { - tmpFile := tmpFileWithContent("", t) - defer os.Remove(tmpFile) - - lines, err := ParseEnvFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - if len(lines) != 0 { - t.Fatal("lines not empty; expected empty") - } -} - -// Test ParseEnvFile for a non existent file -func TestParseEnvFileNonExistentFile(t *testing.T) { - _, err := ParseEnvFile("foo_bar_baz") - if err == nil { - t.Fatal("ParseEnvFile succeeded; expected failure") - } - if _, ok := err.(*os.PathError); !ok { - t.Fatalf("Expected a PathError, got [%v]", err) - } -} - -// Test ParseEnvFile for a badly formatted file -func TestParseEnvFileBadlyFormattedFile(t *testing.T) { - content := `foo=bar - f =quux -` - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatalf("Expected a ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected a ErrBadEnvVariable, got [%v]", err) - } - expectedMessage := "poorly formatted environment: variable 'f ' has white spaces" - if err.Error() != expectedMessage { - t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) - } -} - -// Test ParseEnvFile for a file with a line exeeding bufio.MaxScanTokenSize -func TestParseEnvFileLineTooLongFile(t *testing.T) { - content := strings.Repeat("a", bufio.MaxScanTokenSize+42) - content = fmt.Sprint("foo=", content) - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatal("ParseEnvFile succeeded; expected failure") - } -} - -// ParseEnvFile with a random file, pass through -func TestParseEnvFileRandomFile(t *testing.T) { - content := `first line -another invalid line` - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - - if err == nil { - t.Fatalf("Expected a ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected a ErrBadEnvvariable, got [%v]", err) - } - expectedMessage := "poorly formatted environment: variable 'first line' has white spaces" - if err.Error() != expectedMessage { - t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) - } -} diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go new file mode 100644 index 00000000..ad167592 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts.go @@ -0,0 +1,148 @@ +package opts + +import ( + "fmt" + "net" + "net/url" + "strconv" + "strings" +) + +var ( + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// + // These are the IANA registered port numbers for use with Docker + // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled + DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" + // DefaultTCPHost constant defines the default host string used by docker on Windows + DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) + // DefaultTLSHost constant defines the default host string used by docker for TLS sockets + DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) + // DefaultNamedPipe defines the default named pipe used by docker on Windows + DefaultNamedPipe = `//./pipe/docker_engine` +) + +// ValidateHost validates that the specified string is a valid host and returns it. +func ValidateHost(val string) (string, error) { + host := strings.TrimSpace(val) + // The empty string means default and is not handled by parseDockerDaemonHost + if host != "" { + _, err := parseDockerDaemonHost(host) + if err != nil { + return val, err + } + } + // Note: unlike most flag validators, we don't return the mutated value here + // we need to know what the user entered later (using ParseHost) to adjust for tls + return val, nil +} + +// ParseHost and set defaults for a Daemon host string +func ParseHost(defaultToTLS bool, val string) (string, error) { + host := strings.TrimSpace(val) + if host == "" { + if defaultToTLS { + host = DefaultTLSHost + } else { + host = DefaultHost + } + } else { + var err error + host, err = parseDockerDaemonHost(host) + if err != nil { + return val, err + } + } + return host, nil +} + +// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. +func parseDockerDaemonHost(addr string) (string, error) { + addrParts := strings.Split(addr, "://") + if len(addrParts) == 1 && addrParts[0] != "" { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return parseTCPAddr(addrParts[1], DefaultTCPHost) + case "unix": + return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) + case "npipe": + return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// parseSimpleProtoAddr parses and validates that the specified address is a valid +// socket address for simple protocols like unix and npipe. It returns a formatted +// socket address, either using the address parsed from addr, or the contents of +// defaultAddr if addr is a blank string. +func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, proto+"://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("%s://%s", proto, addr), nil +} + +// parseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func parseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") + defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) + if err != nil { + return "", err + } + // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but + // not 1.4. See https://github.com/golang/go/issues/12200 and + // https://github.com/golang/go/issues/6530. + if strings.HasSuffix(addr, "]:") { + addr += defaultPort + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + if host == "" { + host = defaultHost + } + if port == "" { + port = defaultPort + } + p, err := strconv.Atoi(port) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil +} diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go deleted file mode 100644 index ec52e9a7..00000000 --- a/vendor/github.com/docker/docker/opts/hosts_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build windows - -package opts - -// DefaultHost constant defines the default host string used by docker on Windows -var DefaultHost = DefaultTCPHost diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go index d787b56c..c7b0dc99 100644 --- a/vendor/github.com/docker/docker/opts/ip.go +++ b/vendor/github.com/docker/docker/opts/ip.go @@ -22,7 +22,7 @@ func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { } // Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parsable as an IP address it returns an error. +// string is not parseable as an IP address it returns an error. func (o *IPOpt) Set(val string) error { ip := net.ParseIP(val) if ip == nil { diff --git a/vendor/github.com/docker/docker/opts/ip_test.go b/vendor/github.com/docker/docker/opts/ip_test.go deleted file mode 100644 index 1027d84a..00000000 --- a/vendor/github.com/docker/docker/opts/ip_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package opts - -import ( - "net" - "testing" -) - -func TestIpOptString(t *testing.T) { - addresses := []string{"", "0.0.0.0"} - var ip net.IP - - for _, address := range addresses { - stringAddress := NewIPOpt(&ip, address).String() - if stringAddress != address { - t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) - } - } -} - -func TestNewIpOptInvalidDefaultVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - defaultVal := "Not an ip" - - ipOpt := NewIPOpt(&ip, defaultVal) - - expected := "127.0.0.1" - if ipOpt.String() != expected { - t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) - } -} - -func TestNewIpOptValidDefaultVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - defaultVal := "192.168.1.1" - - ipOpt := NewIPOpt(&ip, defaultVal) - - expected := "192.168.1.1" - if ipOpt.String() != expected { - t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) - } -} - -func TestIpOptSetInvalidVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - ipOpt := &IPOpt{IP: &ip} - - invalidIP := "invalid ip" - expectedError := "invalid ip is not an ip address" - err := ipOpt.Set(invalidIP) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) - } -} diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go index a61b82cd..05aadbe7 100644 --- a/vendor/github.com/docker/docker/opts/opts.go +++ b/vendor/github.com/docker/docker/opts/opts.go @@ -3,33 +3,13 @@ package opts import ( "fmt" "net" - "os" - "path" "regexp" "strings" - - "github.com/docker/docker/pkg/parsers" ) var ( alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// - // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter - // is not supplied. A better longer term solution would be to use a named - // pipe as the default on the Windows daemon. - // These are the IANA registered port numbers for use with Docker - // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker - DefaultHTTPPort = 2375 // Default HTTP Port - // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled - DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port - // DefaultUnixSocket Path for the unix socket. - // Docker daemon by default always listens on the default unix socket - DefaultUnixSocket = "/var/run/docker.sock" - // DefaultTCPHost constant defines the default host string used by docker on Windows - DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) - // DefaultTLSHost constant defines the default host string used by docker for TLS sockets - DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) ) // ListOpts holds a list of values and a validation function. @@ -95,6 +75,16 @@ func (opts *ListOpts) GetAll() []string { return (*opts.values) } +// GetAllOrEmpty returns the values of the slice +// or an empty slice when there are no values. +func (opts *ListOpts) GetAllOrEmpty() []string { + v := *opts.values + if v == nil { + return make([]string, 0) + } + return v +} + // Get checks the existence of the specified key. func (opts *ListOpts) Get(key string) bool { for _, k := range *opts.values { @@ -110,6 +100,35 @@ func (opts *ListOpts) Len() int { return len((*opts.values)) } +// NamedOption is an interface that list and map options +// with names implement. +type NamedOption interface { + Name() string +} + +// NamedListOpts is a ListOpts with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedListOpts struct { + name string + ListOpts +} + +var _ NamedOption = &NamedListOpts{} + +// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. +func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { + return &NamedListOpts{ + name: name, + ListOpts: *NewListOptsRef(values, validator), + } +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *NamedListOpts) Name() string { + return o.name +} + //MapOpts holds a map of values and a validation function. type MapOpts struct { values map[string]string @@ -155,116 +174,35 @@ func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { } } +// NamedMapOpts is a MapOpts struct with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedMapOpts struct { + name string + MapOpts +} + +var _ NamedOption = &NamedMapOpts{} + +// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. +func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { + return &NamedMapOpts{ + name: name, + MapOpts: *NewMapOpts(values, validator), + } +} + +// Name returns the name of the NamedMapOpts in the configuration. +func (o *NamedMapOpts) Name() string { + return o.name +} + // ValidatorFctType defines a validator function that returns a validated string and/or an error. type ValidatorFctType func(val string) (string, error) // ValidatorFctListType defines a validator function that returns a validated list of string and/or an error type ValidatorFctListType func(val string) ([]string, error) -// ValidateAttach validates that the specified string is a valid attach option. -func ValidateAttach(val string) (string, error) { - s := strings.ToLower(val) - for _, str := range []string{"stdin", "stdout", "stderr"} { - if s == str { - return s, nil - } - } - return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") -} - -// ValidateLink validates that the specified string has a valid link format (containerName:alias). -func ValidateLink(val string) (string, error) { - if _, _, err := parsers.ParseLink(val); err != nil { - return val, err - } - return val, nil -} - -// ValidDeviceMode checks if the mode for device is valid or not. -// Valid mode is a composition of r (read), w (write), and m (mknod). -func ValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ - 'r': true, - 'w': true, - 'm': true, - } - if mode == "" { - return false - } - for _, c := range mode { - if !legalDeviceMode[c] { - return false - } - legalDeviceMode[c] = false - } - return true -} - -// ValidateDevice validates a path for devices -// It will make sure 'val' is in the form: -// [host-dir:]container-path[:mode] -// It also validates the device mode. -func ValidateDevice(val string) (string, error) { - return validatePath(val, ValidDeviceMode) -} - -func validatePath(val string, validator func(string) bool) (string, error) { - var containerPath string - var mode string - - if strings.Count(val, ":") > 2 { - return val, fmt.Errorf("bad format for path: %s", val) - } - - split := strings.SplitN(val, ":", 3) - if split[0] == "" { - return val, fmt.Errorf("bad format for path: %s", val) - } - switch len(split) { - case 1: - containerPath = split[0] - val = path.Clean(containerPath) - case 2: - if isValid := validator(split[1]); isValid { - containerPath = split[0] - mode = split[1] - val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) - } else { - containerPath = split[1] - val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) - } - case 3: - containerPath = split[1] - mode = split[2] - if isValid := validator(split[2]); !isValid { - return val, fmt.Errorf("bad mode specified: %s", mode) - } - val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) - } - - if !path.IsAbs(containerPath) { - return val, fmt.Errorf("%s is not an absolute path", containerPath) - } - return val, nil -} - -// ValidateEnv validates an environment variable and returns it. -// If no value is specified, it returns the current value using os.Getenv. -// -// As on ParseEnvFile and related to #16585, environment variable names -// are not validate what so ever, it's up to application inside docker -// to validate them or not. -func ValidateEnv(val string) (string, error) { - arr := strings.Split(val, "=") - if len(arr) > 1 { - return val, nil - } - if !doesEnvExist(val) { - return val, nil - } - return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil -} - // ValidateIPAddress validates an Ip address. func ValidateIPAddress(val string) (string, error) { var ip = net.ParseIP(strings.TrimSpace(val)) @@ -274,15 +212,6 @@ func ValidateIPAddress(val string) (string, error) { return "", fmt.Errorf("%s is not an ip address", val) } -// ValidateMACAddress validates a MAC address. -func ValidateMACAddress(val string) (string, error) { - _, err := net.ParseMAC(strings.TrimSpace(val)) - if err != nil { - return "", err - } - return val, nil -} - // ValidateDNSSearch validates domain for resolvconf search configuration. // A zero length domain is represented by a dot (.). func ValidateDNSSearch(val string) (string, error) { @@ -303,20 +232,6 @@ func validateDomain(val string) (string, error) { return "", fmt.Errorf("%s is not a valid domain", val) } -// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). -func ValidateExtraHost(val string) (string, error) { - // allow for IPv6 addresses in extra hosts by only splitting on first ":" - arr := strings.SplitN(val, ":", 2) - if len(arr) != 2 || len(arr[0]) == 0 { - return "", fmt.Errorf("bad format for add-host: %q", val) - } - if _, err := ValidateIPAddress(arr[1]); err != nil { - return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) - } - return val, nil -} - // ValidateLabel validates that the specified string is a valid label, and returns it. // Labels are in the form on key=value. func ValidateLabel(val string) (string, error) { @@ -325,33 +240,3 @@ func ValidateLabel(val string) (string, error) { } return val, nil } - -// ValidateHost validates that the specified string is a valid host and returns it. -func ValidateHost(val string) (string, error) { - _, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val) - if err != nil { - return val, err - } - // Note: unlike most flag validators, we don't return the mutated value here - // we need to know what the user entered later (using ParseHost) to adjust for tls - return val, nil -} - -// ParseHost and set defaults for a Daemon host string -func ParseHost(defaultHost, val string) (string, error) { - host, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val) - if err != nil { - return val, err - } - return host, nil -} - -func doesEnvExist(name string) bool { - for _, entry := range os.Environ() { - parts := strings.SplitN(entry, "=", 2) - if parts[0] == name { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/opts/opts_test.go b/vendor/github.com/docker/docker/opts/opts_test.go deleted file mode 100644 index e02d3f8e..00000000 --- a/vendor/github.com/docker/docker/opts/opts_test.go +++ /dev/null @@ -1,427 +0,0 @@ -package opts - -import ( - "fmt" - "os" - "runtime" - "strings" - "testing" -) - -func TestValidateIPAddress(t *testing.T) { - if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { - t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { - t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) - } - -} - -func TestMapOpts(t *testing.T) { - tmpMap := make(map[string]string) - o := NewMapOpts(tmpMap, logOptsValidator) - o.Set("max-size=1") - if o.String() != "map[max-size:1]" { - t.Errorf("%s != [map[max-size:1]", o.String()) - } - - o.Set("max-file=2") - if len(tmpMap) != 2 { - t.Errorf("map length %d != 2", len(tmpMap)) - } - - if tmpMap["max-file"] != "2" { - t.Errorf("max-file = %s != 2", tmpMap["max-file"]) - } - - if tmpMap["max-size"] != "1" { - t.Errorf("max-size = %s != 1", tmpMap["max-size"]) - } - if o.Set("dummy-val=3") == nil { - t.Errorf("validator is not being called") - } -} - -func TestValidateMACAddress(t *testing.T) { - if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { - t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) - } - - if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { - t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") - } - - if _, err := ValidateMACAddress(`random invalid string`); err == nil { - t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") - } -} - -func TestListOptsWithoutValidator(t *testing.T) { - o := NewListOpts(nil) - o.Set("foo") - if o.String() != "[foo]" { - t.Errorf("%s != [foo]", o.String()) - } - o.Set("bar") - if o.Len() != 2 { - t.Errorf("%d != 2", o.Len()) - } - o.Set("bar") - if o.Len() != 3 { - t.Errorf("%d != 3", o.Len()) - } - if !o.Get("bar") { - t.Error("o.Get(\"bar\") == false") - } - if o.Get("baz") { - t.Error("o.Get(\"baz\") == true") - } - o.Delete("foo") - if o.String() != "[bar bar]" { - t.Errorf("%s != [bar bar]", o.String()) - } - listOpts := o.GetAll() - if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { - t.Errorf("Expected [[bar bar]], got [%v]", listOpts) - } - mapListOpts := o.GetMap() - if len(mapListOpts) != 1 { - t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) - } - -} - -func TestListOptsWithValidator(t *testing.T) { - // Re-using logOptsvalidator (used by MapOpts) - o := NewListOpts(logOptsValidator) - o.Set("foo") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) - } - o.Set("foo=bar") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) - } - o.Set("max-file=2") - if o.Len() != 1 { - t.Errorf("%d != 1", o.Len()) - } - if !o.Get("max-file=2") { - t.Error("o.Get(\"max-file=2\") == false") - } - if o.Get("baz") { - t.Error("o.Get(\"baz\") == true") - } - o.Delete("max-file=2") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) - } -} - -func TestValidateDNSSearch(t *testing.T) { - valid := []string{ - `.`, - `a`, - `a.`, - `1.foo`, - `17.foo`, - `foo.bar`, - `foo.bar.baz`, - `foo.bar.`, - `foo.bar.baz`, - `foo1.bar2`, - `foo1.bar2.baz`, - `1foo.2bar.`, - `1foo.2bar.baz`, - `foo-1.bar-2`, - `foo-1.bar-2.baz`, - `foo-1.bar-2.`, - `foo-1.bar-2.baz`, - `1-foo.2-bar`, - `1-foo.2-bar.baz`, - `1-foo.2-bar.`, - `1-foo.2-bar.baz`, - } - - invalid := []string{ - ``, - ` `, - ` `, - `17`, - `17.`, - `.17`, - `17-.`, - `17-.foo`, - `.foo`, - `foo-.bar`, - `-foo.bar`, - `foo.bar-`, - `foo.bar-.baz`, - `foo.-bar`, - `foo.-bar.baz`, - `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, - } - - for _, domain := range valid { - if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { - t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) - } - } - - for _, domain := range invalid { - if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { - t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) - } - } -} - -func TestValidateExtraHosts(t *testing.T) { - valid := []string{ - `myhost:192.168.0.1`, - `thathost:10.0.2.1`, - `anipv6host:2003:ab34:e::1`, - `ipv6local:::1`, - } - - invalid := map[string]string{ - `myhost:192.notanipaddress.1`: `invalid IP`, - `thathost-nosemicolon10.0.0.1`: `bad format`, - `anipv6host:::::1`: `invalid IP`, - `ipv6local:::0::`: `invalid IP`, - } - - for _, extrahost := range valid { - if _, err := ValidateExtraHost(extrahost); err != nil { - t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) - } - } - - for extraHost, expectedError := range invalid { - if _, err := ValidateExtraHost(extraHost); err == nil { - t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) - } - } - } -} - -func TestValidateAttach(t *testing.T) { - valid := []string{ - "stdin", - "stdout", - "stderr", - "STDIN", - "STDOUT", - "STDERR", - } - if _, err := ValidateAttach("invalid"); err == nil { - t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") - } - - for _, attach := range valid { - value, err := ValidateAttach(attach) - if err != nil { - t.Fatal(err) - } - if value != strings.ToLower(attach) { - t.Fatalf("Expected [%v], got [%v]", attach, value) - } - } -} - -func TestValidateLink(t *testing.T) { - valid := []string{ - "name", - "dcdfbe62ecd0:alias", - "7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da", - "angry_torvalds:linus", - } - invalid := map[string]string{ - "": "empty string specified for links", - "too:much:of:it": "bad format for links: too:much:of:it", - } - - for _, link := range valid { - if _, err := ValidateLink(link); err != nil { - t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err) - } - } - - for link, expectedError := range invalid { - if _, err := ValidateLink(link); err == nil { - t.Fatalf("ValidateLink(`%q`) should have failed validation", link) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError) - } - } - } -} - -func TestValidateDevice(t *testing.T) { - valid := []string{ - "/home", - "/home:/home", - "/home:/something/else", - "/with space", - "/home:/with space", - "relative:/absolute-path", - "hostPath:/containerPath:r", - "/hostPath:/containerPath:rw", - "/hostPath:/containerPath:mrw", - } - invalid := map[string]string{ - "": "bad format for path: ", - "./": "./ is not an absolute path", - "../": "../ is not an absolute path", - "/:../": "../ is not an absolute path", - "/:path": "path is not an absolute path", - ":": "bad format for path: :", - "/tmp:": " is not an absolute path", - ":test": "bad format for path: :test", - ":/test": "bad format for path: :/test", - "tmp:": " is not an absolute path", - ":test:": "bad format for path: :test:", - "::": "bad format for path: ::", - ":::": "bad format for path: :::", - "/tmp:::": "bad format for path: /tmp:::", - ":/tmp::": "bad format for path: :/tmp::", - "path:ro": "ro is not an absolute path", - "path:rr": "rr is not an absolute path", - "a:/b:ro": "bad mode specified: ro", - "a:/b:rr": "bad mode specified: rr", - } - - for _, path := range valid { - if _, err := ValidateDevice(path); err != nil { - t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) - } - } - - for path, expectedError := range invalid { - if _, err := ValidateDevice(path); err == nil { - t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) - } else { - if err.Error() != expectedError { - t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) - } - } - } -} - -func TestValidateEnv(t *testing.T) { - valids := map[string]string{ - "a": "a", - "something": "something", - "_=a": "_=a", - "env1=value1": "env1=value1", - "_env1=value1": "_env1=value1", - "env2=value2=value3": "env2=value2=value3", - "env3=abc!qwe": "env3=abc!qwe", - "env_4=value 4": "env_4=value 4", - "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), - "PATH=something": "PATH=something", - "asd!qwe": "asd!qwe", - "1asd": "1asd", - "123": "123", - "some space": "some space", - " some space before": " some space before", - "some space after ": "some space after ", - } - for value, expected := range valids { - actual, err := ValidateEnv(value) - if err != nil { - t.Fatal(err) - } - if actual != expected { - t.Fatalf("Expected [%v], got [%v]", expected, actual) - } - } -} - -func TestValidateLabel(t *testing.T) { - if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { - t.Fatalf("Expected an error [bad attribute format: label], go %v", err) - } - if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { - t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) - } - // Validate it's working with more than one = - if actual, err := ValidateLabel("key1=value1=value2"); err != nil { - t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) - } - // Validate it's working with one more - if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { - t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) - } -} - -func TestParseHost(t *testing.T) { - invalid := map[string]string{ - "anything": "Invalid bind address format: anything", - "something with spaces": "Invalid bind address format: something with spaces", - "://": "Invalid bind address format: ://", - "unknown://": "Invalid bind address format: unknown://", - "tcp://:port": "Invalid bind address format: :port", - "tcp://invalid": "Invalid bind address format: invalid", - "tcp://invalid:port": "Invalid bind address format: invalid:port", - } - const defaultHTTPHost = "tcp://127.0.0.1:2375" - var defaultHOST = "unix:///var/run/docker.sock" - - if runtime.GOOS == "windows" { - defaultHOST = defaultHTTPHost - } - valid := map[string]string{ - "": defaultHOST, - "fd://": "fd://", - "fd://something": "fd://something", - "tcp://host:": "tcp://host:2375", - "tcp://": "tcp://localhost:2375", - "tcp://:2375": "tcp://localhost:2375", // default ip address - "tcp://:2376": "tcp://localhost:2376", // default ip address - "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", - "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", - "tcp://192.168:8080": "tcp://192.168:8080", - "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P - "tcp://docker.com:2375": "tcp://docker.com:2375", - "unix://": "unix:///var/run/docker.sock", // default unix:// value - "unix://path/to/socket": "unix://path/to/socket", - } - - for value, errorMessage := range invalid { - if _, err := ParseHost(defaultHTTPHost, value); err == nil || err.Error() != errorMessage { - t.Fatalf("Expected an error for %v with [%v], got [%v]", value, errorMessage, err) - } - } - for value, expected := range valid { - if actual, err := ParseHost(defaultHTTPHost, value); err != nil || actual != expected { - t.Fatalf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) - } - } -} - -func logOptsValidator(val string) (string, error) { - allowedKeys := map[string]string{"max-size": "1", "max-file": "2"} - vals := strings.Split(val, "=") - if allowedKeys[vals[0]] != "" { - return val, nil - } - return "", fmt.Errorf("invalid key %s", vals[0]) -} diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go deleted file mode 100644 index b9ff2bae..00000000 --- a/vendor/github.com/docker/docker/opts/opts_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -package opts - -// TODO Windows. Identify bug in GOLang 1.5.1 and/or Windows Server 2016 TP4. -// @jhowardmsft, @swernli. -// -// On Windows, this mitigates a problem with the default options of running -// a docker client against a local docker daemon on TP4. -// -// What was found that if the default host is "localhost", even if the client -// (and daemon as this is local) is not physically on a network, and the DNS -// cache is flushed (ipconfig /flushdns), then the client will pause for -// exactly one second when connecting to the daemon for calls. For example -// using docker run windowsservercore cmd, the CLI will send a create followed -// by an attach. You see the delay between the attach finishing and the attach -// being seen by the daemon. -// -// Here's some daemon debug logs with additional debug spew put in. The -// AfterWriteJSON log is the very last thing the daemon does as part of the -// create call. The POST /attach is the second CLI call. Notice the second -// time gap. -// -// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" -// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" -// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." -// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=toDiskLocking.... -// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." -// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." -// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" -// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" -// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" -// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" -// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" -// ... 1 second gap here.... -// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" -// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" -// -// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change -// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows TP4. In theory, -// the Windows networking stack is supposed to resolve "localhost" internally, -// without hitting DNS, or even reading the hosts file (which is why localhost -// is commented out in the hosts file on Windows). -// -// We have validated that working around this using the actual IPv4 localhost -// address does not cause the delay. -// -// This does not occur with the docker client built with 1.4.3 on the same -// Windows TP4 build, regardless of whether the daemon is built using 1.5.1 -// or 1.4.3. It does not occur on Linux. We also verified we see the same thing -// on a cross-compiled Windows binary (from Linux). -// -// Final note: This is a mitigation, not a 'real' fix. It is still susceptible -// to the delay in TP4 if a user were to do 'docker run -H=tcp://localhost:2375...' -// explicitly. - -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 -const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/docker/docker/opts/ulimit_test.go b/vendor/github.com/docker/docker/opts/ulimit_test.go deleted file mode 100644 index 3845d1ec..00000000 --- a/vendor/github.com/docker/docker/opts/ulimit_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package opts - -import ( - "testing" - - "github.com/docker/docker/pkg/ulimit" -) - -func TestUlimitOpt(t *testing.T) { - ulimitMap := map[string]*ulimit.Ulimit{ - "nofile": {"nofile", 1024, 512}, - } - - ulimitOpt := NewUlimitOpt(&ulimitMap) - - expected := "[nofile=512:1024]" - if ulimitOpt.String() != expected { - t.Fatalf("Expected %v, got %v", expected, ulimitOpt) - } - - // Valid ulimit append to opts - if err := ulimitOpt.Set("core=1024:1024"); err != nil { - t.Fatal(err) - } - - // Invalid ulimit type returns an error and do not append to opts - if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { - t.Fatalf("Expected error on invalid ulimit type") - } - expected = "[nofile=512:1024 core=1024:1024]" - expected2 := "[core=1024:1024 nofile=512:1024]" - result := ulimitOpt.String() - if result != expected && result != expected2 { - t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) - } - - // And test GetList - ulimits := ulimitOpt.GetList() - if len(ulimits) != 2 { - t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) - } -} diff --git a/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go new file mode 100644 index 00000000..6af771ff --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go @@ -0,0 +1,92 @@ +// Package aaparser is a convenience package interacting with `apparmor_parser`. +package aaparser + +import ( + "fmt" + "github.com/docker/containerd/subreaper/exec" + "path/filepath" + "strconv" + "strings" +) + +const ( + binary = "apparmor_parser" +) + +// GetVersion returns the major and minor version of apparmor_parser. +func GetVersion() (int, error) { + output, err := cmd("", "--version") + if err != nil { + return -1, err + } + + return parseVersion(output) +} + +// LoadProfile runs `apparmor_parser -r -W` on a specified apparmor profile to +// replace and write it to disk. +func LoadProfile(profilePath string) error { + _, err := cmd(filepath.Dir(profilePath), "-r", "-W", filepath.Base(profilePath)) + if err != nil { + return err + } + return nil +} + +// cmd runs `apparmor_parser` with the passed arguments. +func cmd(dir string, arg ...string) (string, error) { + c := exec.Command(binary, arg...) + c.Dir = dir + + output, err := c.CombinedOutput() + if err != nil { + return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), string(output), err) + } + + return string(output), nil +} + +// parseVersion takes the output from `apparmor_parser --version` and returns +// a representation of the {major, minor, patch} version as a single number of +// the form MMmmPPP {major, minor, patch}. +func parseVersion(output string) (int, error) { + // output is in the form of the following: + // AppArmor parser version 2.9.1 + // Copyright (C) 1999-2008 Novell Inc. + // Copyright 2009-2012 Canonical Ltd. + + lines := strings.SplitN(output, "\n", 2) + words := strings.Split(lines[0], " ") + version := words[len(words)-1] + + // split by major minor version + v := strings.Split(version, ".") + if len(v) == 0 || len(v) > 3 { + return -1, fmt.Errorf("parsing version failed for output: `%s`", output) + } + + // Default the versions to 0. + var majorVersion, minorVersion, patchLevel int + + majorVersion, err := strconv.Atoi(v[0]) + if err != nil { + return -1, err + } + + if len(v) > 1 { + minorVersion, err = strconv.Atoi(v[1]) + if err != nil { + return -1, err + } + } + if len(v) > 2 { + patchLevel, err = strconv.Atoi(v[2]) + if err != nil { + return -1, err + } + } + + // major*10^5 + minor*10^3 + patch*10^0 + numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel + return numericVersion, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index e7c82e10..923e7ca1 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -11,7 +11,7 @@ import ( "io" "io/ioutil" "os" - "os/exec" + "github.com/docker/containerd/subreaper/exec" "path/filepath" "runtime" "strings" @@ -31,7 +31,7 @@ type ( Archive io.ReadCloser // Reader is a type of io.Reader. Reader io.Reader - // Compression is the state represtents if compressed or not. + // Compression is the state represents if compressed or not. Compression int // TarChownOptions wraps the chown options UID and GID. TarChownOptions struct { @@ -77,6 +77,11 @@ var ( defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} ) +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota @@ -88,7 +93,8 @@ const ( Xz ) -// IsArchive checks if it is a archive by the header. +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. func IsArchive(header []byte) bool { compression := DetectCompression(header) if compression != Uncompressed { @@ -99,6 +105,23 @@ func IsArchive(header []byte) bool { return err == nil } +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ @@ -128,7 +151,13 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) - if err != nil { + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 return nil, err } @@ -275,8 +304,9 @@ func (ta *tarAppender) addTarFile(path, name string) error { } //handle re-mapping container ID mappings back to host ID mappings before - //writing tar headers/files - if ta.UIDMaps != nil || ta.GIDMaps != nil { + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { uid, gid, err := getFileUIDGID(fi.Sys()) if err != nil { return err @@ -407,19 +437,25 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L return err } + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } @@ -466,13 +502,13 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close tar writer: %s", err) + logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { - logrus.Debugf("Can't close compress writer: %s", err) + logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { - logrus.Debugf("Can't close pipe writer: %s", err) + logrus.Errorf("Can't close pipe writer: %s", err) } }() @@ -515,7 +551,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { - logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } @@ -540,16 +576,42 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) if include != relFilePath { skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) if err != nil { - logrus.Debugf("Error matching %s: %v", relFilePath, err) + logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } } if skip { - if !exceptions && f.IsDir() { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (eg !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !exceptions { return filepath.SkipDir } - return nil + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range patterns { + if pat[0] != '!' { + continue + } + pat = pat[1:] + string(filepath.Separator) + if strings.HasPrefix(pat, dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir } if seen[relFilePath] { @@ -571,7 +633,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", filePath, err) + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writting tar stream to it + if err == io.ErrClosedPipe { + return err + } } return nil }) @@ -624,7 +690,7 @@ loop: parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0777) + err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID) if err != nil { return err } @@ -794,10 +860,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error { GIDMaps: archiver.GIDMaps, } } - if err := archiver.Untar(archive, dst, options); err != nil { - return err - } - return nil + return archiver.Untar(archive, dst, options) } // UntarPath is a convenience function which looks for an archive @@ -818,9 +881,17 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } // Create dst, copy src's content into it logrus.Debugf("Creating dest directory: %s", dst) - if err := system.MkdirAll(dst, 0755); err != nil { + if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil { return err } logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_test.go deleted file mode 100644 index 6c54c02d..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_test.go +++ /dev/null @@ -1,1204 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "syscall" - "testing" - "time" - - "github.com/docker/docker/pkg/system" -) - -func TestIsArchiveNilHeader(t *testing.T) { - out := IsArchive(nil) - if out { - t.Fatalf("isArchive should return false as nil is not a valid archive header") - } -} - -func TestIsArchiveInvalidHeader(t *testing.T) { - header := []byte{0x00, 0x01, 0x02} - out := IsArchive(header) - if out { - t.Fatalf("isArchive should return false as %s is not a valid archive header", header) - } -} - -func TestIsArchiveBzip2(t *testing.T) { - header := []byte{0x42, 0x5A, 0x68} - out := IsArchive(header) - if !out { - t.Fatalf("isArchive should return true as %s is a bz2 header", header) - } -} - -func TestIsArchive7zip(t *testing.T) { - header := []byte{0x50, 0x4b, 0x03, 0x04} - out := IsArchive(header) - if out { - t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) - } -} - -func TestDecompressStreamGzip(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - archive, err := os.Open("/tmp/archive.gz") - _, err = DecompressStream(archive) - if err != nil { - t.Fatalf("Failed to decompress a gzip file.") - } -} - -func TestDecompressStreamBzip2(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - archive, err := os.Open("/tmp/archive.bz2") - _, err = DecompressStream(archive) - if err != nil { - t.Fatalf("Failed to decompress a bzip2 file.") - } -} - -func TestDecompressStreamXz(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && xz -f /tmp/archive") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - archive, err := os.Open("/tmp/archive.xz") - _, err = DecompressStream(archive) - if err != nil { - t.Fatalf("Failed to decompress a xz file.") - } -} - -func TestCompressStreamXzUnsuported(t *testing.T) { - dest, err := os.Create("/tmp/dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - _, err = CompressStream(dest, Xz) - if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") - } -} - -func TestCompressStreamBzip2Unsupported(t *testing.T) { - dest, err := os.Create("/tmp/dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - _, err = CompressStream(dest, Xz) - if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") - } -} - -func TestCompressStreamInvalid(t *testing.T) { - dest, err := os.Create("/tmp/dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - _, err = CompressStream(dest, -1) - if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") - } -} - -func TestExtensionInvalid(t *testing.T) { - compression := Compression(-1) - output := compression.Extension() - if output != "" { - t.Fatalf("The extension of an invalid compression should be an empty string.") - } -} - -func TestExtensionUncompressed(t *testing.T) { - compression := Uncompressed - output := compression.Extension() - if output != "tar" { - t.Fatalf("The extension of a uncompressed archive should be 'tar'.") - } -} -func TestExtensionBzip2(t *testing.T) { - compression := Bzip2 - output := compression.Extension() - if output != "tar.bz2" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") - } -} -func TestExtensionGzip(t *testing.T) { - compression := Gzip - output := compression.Extension() - if output != "tar.gz" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") - } -} -func TestExtensionXz(t *testing.T) { - compression := Xz - output := compression.Extension() - if output != "tar.xz" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") - } -} - -func TestCmdStreamLargeStderr(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") - out, _, err := cmdStream(cmd, nil) - if err != nil { - t.Fatalf("Failed to start command: %s", err) - } - errCh := make(chan error) - go func() { - _, err := io.Copy(ioutil.Discard, out) - errCh <- err - }() - select { - case err := <-errCh: - if err != nil { - t.Fatalf("Command should not have failed (err=%.100s...)", err) - } - case <-time.After(5 * time.Second): - t.Fatalf("Command did not complete in 5 seconds; probable deadlock") - } -} - -func TestCmdStreamBad(t *testing.T) { - badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") - out, _, err := cmdStream(badCmd, nil) - if err != nil { - t.Fatalf("Failed to start command: %s", err) - } - if output, err := ioutil.ReadAll(out); err == nil { - t.Fatalf("Command should have failed") - } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { - t.Fatalf("Wrong error value (%s)", err) - } else if s := string(output); s != "hello\n" { - t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) - } -} - -func TestCmdStreamGood(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") - out, _, err := cmdStream(cmd, nil) - if err != nil { - t.Fatal(err) - } - if output, err := ioutil.ReadAll(out); err != nil { - t.Fatalf("Command should not have failed (err=%s)", err) - } else if s := string(output); s != "hello\n" { - t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) - } -} - -func TestUntarPathWithInvalidDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - invalidDestFolder := path.Join(tempFolder, "invalidDest") - // Create a src file - srcFile := path.Join(tempFolder, "src") - _, err = os.Create(srcFile) - if err != nil { - t.Fatalf("Fail to create the source file") - } - err = UntarPath(srcFile, invalidDestFolder) - if err == nil { - t.Fatalf("UntarPath with invalid destination path should throw an error.") - } -} - -func TestUntarPathWithInvalidSrc(t *testing.T) { - dest, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - defer os.RemoveAll(dest) - err = UntarPath("/invalid/path", dest) - if err == nil { - t.Fatalf("UntarPath with invalid src path should throw an error.") - } -} - -func TestUntarPath(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - srcFile := path.Join(tmpFolder, "src") - tarFile := path.Join(tmpFolder, "src.tar") - os.Create(path.Join(tmpFolder, "src")) - cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - destFolder := path.Join(tmpFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatalf("Fail to create the destination file") - } - err = UntarPath(tarFile, destFolder) - if err != nil { - t.Fatalf("UntarPath shouldn't throw an error, %s.", err) - } - expectedFile := path.Join(destFolder, srcFile) - _, err = os.Stat(expectedFile) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -// Do the same test as above but with the destination as file, it should fail -func TestUntarPathWithDestinationFile(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - srcFile := path.Join(tmpFolder, "src") - tarFile := path.Join(tmpFolder, "src.tar") - os.Create(path.Join(tmpFolder, "src")) - cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - destFile := path.Join(tmpFolder, "dest") - _, err = os.Create(destFile) - if err != nil { - t.Fatalf("Fail to create the destination file") - } - err = UntarPath(tarFile, destFile) - if err == nil { - t.Fatalf("UntarPath should throw an error if the destination if a file") - } -} - -// Do the same test as above but with the destination folder already exists -// and the destination file is a directory -// It's working, see https://github.com/docker/docker/issues/10040 -func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - srcFile := path.Join(tmpFolder, "src") - tarFile := path.Join(tmpFolder, "src.tar") - os.Create(srcFile) - cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - destFolder := path.Join(tmpFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatalf("Fail to create the destination folder") - } - // Let's create a folder that will has the same path as the extracted file (from tar) - destSrcFileAsFolder := path.Join(destFolder, srcFile) - err = os.MkdirAll(destSrcFileAsFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = UntarPath(tarFile, destFolder) - if err != nil { - t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") - } -} - -func TestCopyWithTarInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - destFolder := path.Join(tempFolder, "dest") - invalidSrc := path.Join(tempFolder, "doesnotexists") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = CopyWithTar(invalidSrc, destFolder) - if err == nil { - t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") - } -} - -func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - srcFolder := path.Join(tempFolder, "src") - inexistentDestFolder := path.Join(tempFolder, "doesnotexists") - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = CopyWithTar(srcFolder, inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") - } - _, err = os.Stat(inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder should create it.") - } -} - -// Test CopyWithTar with a file as src -func TestCopyWithTarSrcFile(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := path.Join(folder, "dest") - srcFolder := path.Join(folder, "src") - src := path.Join(folder, path.Join("src", "src")) - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest) - if err != nil { - t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - // FIXME Check the content - if err != nil { - t.Fatalf("Destination file should be the same as the source.") - } -} - -// Test CopyWithTar with a folder as src -func TestCopyWithTarSrcFolder(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := path.Join(folder, "dest") - src := path.Join(folder, path.Join("src", "folder")) - err = os.MkdirAll(src, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(path.Join(src, "file"), []byte("content"), 0777) - err = CopyWithTar(src, dest) - if err != nil { - t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - // FIXME Check the content (the file inside) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -func TestCopyFileWithTarInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - destFolder := path.Join(tempFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatal(err) - } - invalidFile := path.Join(tempFolder, "doesnotexists") - err = CopyFileWithTar(invalidFile, destFolder) - if err == nil { - t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") - } -} - -func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - defer os.RemoveAll(tempFolder) - srcFile := path.Join(tempFolder, "src") - inexistentDestFolder := path.Join(tempFolder, "doesnotexists") - _, err = os.Create(srcFile) - if err != nil { - t.Fatal(err) - } - err = CopyFileWithTar(srcFile, inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") - } - _, err = os.Stat(inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder should create it.") - } - // FIXME Test the src file and content -} - -func TestCopyFileWithTarSrcFolder(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := path.Join(folder, "dest") - src := path.Join(folder, "srcfolder") - err = os.MkdirAll(src, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - err = CopyFileWithTar(src, dest) - if err == nil { - t.Fatalf("CopyFileWithTar should throw an error with a folder.") - } -} - -func TestCopyFileWithTarSrcFile(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := path.Join(folder, "dest") - srcFolder := path.Join(folder, "src") - src := path.Join(folder, path.Join("src", "src")) - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest+"/") - if err != nil { - t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -func TestTarFiles(t *testing.T) { - // try without hardlinks - if err := checkNoChanges(1000, false); err != nil { - t.Fatal(err) - } - // try with hardlinks - if err := checkNoChanges(1000, true); err != nil { - t.Fatal(err) - } -} - -func checkNoChanges(fileNum int, hardlinks bool) error { - srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - if err != nil { - return err - } - defer os.RemoveAll(srcDir) - - destDir, err := ioutil.TempDir("", "docker-test-destDir") - if err != nil { - return err - } - defer os.RemoveAll(destDir) - - _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) - if err != nil { - return err - } - - err = TarUntar(srcDir, destDir) - if err != nil { - return err - } - - changes, err := ChangesDirs(destDir, srcDir) - if err != nil { - return err - } - if len(changes) > 0 { - return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) - } - return nil -} - -func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { - archive, err := TarWithOptions(origin, options) - if err != nil { - t.Fatal(err) - } - defer archive.Close() - - buf := make([]byte, 10) - if _, err := archive.Read(buf); err != nil { - return nil, err - } - wrap := io.MultiReader(bytes.NewReader(buf), archive) - - detectedCompression := DetectCompression(buf) - compression := options.Compression - if detectedCompression.Extension() != compression.Extension() { - return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) - } - - tmp, err := ioutil.TempDir("", "docker-test-untar") - if err != nil { - return nil, err - } - defer os.RemoveAll(tmp) - if err := Untar(wrap, tmp, nil); err != nil { - return nil, err - } - if _, err := os.Stat(tmp); err != nil { - return nil, err - } - - return ChangesDirs(origin, tmp) -} - -func TestTarUntar(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { - t.Fatal(err) - } - - for _, c := range []Compression{ - Uncompressed, - Gzip, - } { - changes, err := tarUntar(t, origin, &TarOptions{ - Compression: c, - ExcludePatterns: []string{"3"}, - }) - - if err != nil { - t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) - } - - if len(changes) != 1 || changes[0].Path != "/3" { - t.Fatalf("Unexpected differences after tarUntar: %v", changes) - } - } -} - -func TestTarUntarWithXattr(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { - t.Fatal(err) - } - if err := system.Lsetxattr(path.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { - t.Fatal(err) - } - - for _, c := range []Compression{ - Uncompressed, - Gzip, - } { - changes, err := tarUntar(t, origin, &TarOptions{ - Compression: c, - ExcludePatterns: []string{"3"}, - }) - - if err != nil { - t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) - } - - if len(changes) != 1 || changes[0].Path != "/3" { - t.Fatalf("Unexpected differences after tarUntar: %v", changes) - } - capability, _ := system.Lgetxattr(path.Join(origin, "2"), "security.capability") - if capability == nil && capability[0] != 0x00 { - t.Fatalf("Untar should have kept the 'security.capability' xattr.") - } - } -} - -func TestTarWithOptions(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - if _, err := ioutil.TempDir(origin, "folder"); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - - cases := []struct { - opts *TarOptions - numChanges int - }{ - {&TarOptions{IncludeFiles: []string{"1"}}, 2}, - {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, - {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, - {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, - {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, - } - for _, testCase := range cases { - changes, err := tarUntar(t, origin, testCase.opts) - if err != nil { - t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) - } - if len(changes) != testCase.numChanges { - t.Errorf("Expected %d changes, got %d for %+v:", - testCase.numChanges, len(changes), testCase.opts) - } - } -} - -// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz -// use PAX Global Extended Headers. -// Failing prevents the archives from being uncompressed during ADD -func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { - hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} - tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil) - if err != nil { - t.Fatal(err) - } -} - -// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. -// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. -func TestUntarUstarGnuConflict(t *testing.T) { - f, err := os.Open("testdata/broken.tar") - if err != nil { - t.Fatal(err) - } - found := false - tr := tar.NewReader(f) - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - t.Fatal(err) - } - if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { - found = true - break - } - } - if !found { - t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") - } -} - -func TestTarWithBlockCharFifo(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := system.Mknod(path.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } - if err := system.Mknod(path.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } - if err := system.Mknod(path.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } - - dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dest) - - // we'll do this in two steps to separate failure - fh, err := Tar(origin, Uncompressed) - if err != nil { - t.Fatal(err) - } - - // ensure we can read the whole thing with no error, before writing back out - buf, err := ioutil.ReadAll(fh) - if err != nil { - t.Fatal(err) - } - - bRdr := bytes.NewReader(buf) - err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - if err != nil { - t.Fatal(err) - } - - changes, err := ChangesDirs(origin, dest) - if err != nil { - t.Fatal(err) - } - if len(changes) > 0 { - t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) - } -} - -func TestTarWithHardLink(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil { - t.Fatal(err) - } - - var i1, i2 uint64 - if i1, err = getNlink(path.Join(origin, "1")); err != nil { - t.Fatal(err) - } - // sanity check that we can hardlink - if i1 != 2 { - t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) - } - - dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dest) - - // we'll do this in two steps to separate failure - fh, err := Tar(origin, Uncompressed) - if err != nil { - t.Fatal(err) - } - - // ensure we can read the whole thing with no error, before writing back out - buf, err := ioutil.ReadAll(fh) - if err != nil { - t.Fatal(err) - } - - bRdr := bytes.NewReader(buf) - err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - if err != nil { - t.Fatal(err) - } - - if i1, err = getInode(path.Join(dest, "1")); err != nil { - t.Fatal(err) - } - if i2, err = getInode(path.Join(dest, "2")); err != nil { - t.Fatal(err) - } - - if i1 != i2 { - t.Errorf("expected matching inodes, but got %d and %d", i1, i2) - } -} - -func getNlink(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - statT, ok := stat.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) - } - // We need this conversion on ARM64 - return uint64(statT.Nlink), nil -} - -func getInode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - statT, ok := stat.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) - } - return statT.Ino, nil -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} - -func BenchmarkTarUntar(b *testing.B) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - b.Fatal(err) - } - tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") - if err != nil { - b.Fatal(err) - } - target := path.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin, false) - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(origin) - defer os.RemoveAll(tempDir) - - b.ResetTimer() - b.SetBytes(int64(n)) - for n := 0; n < b.N; n++ { - err := TarUntar(origin, target) - if err != nil { - b.Fatal(err) - } - os.RemoveAll(target) - } -} - -func BenchmarkTarUntarWithLinks(b *testing.B) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - b.Fatal(err) - } - tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") - if err != nil { - b.Fatal(err) - } - target := path.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin, true) - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(origin) - defer os.RemoveAll(tempDir) - - b.ResetTimer() - b.SetBytes(int64(n)) - for n := 0; n < b.N; n++ { - err := TarUntar(origin, target) - if err != nil { - b.Fatal(err) - } - os.RemoveAll(target) - } -} - -func TestUntarInvalidFilenames(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { - { - Name: "../victim/dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { - { - // Note the leading slash - Name: "/../victim/slash-dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarHardlinkToSymlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { - { - Name: "symlink1", - Typeflag: tar.TypeSymlink, - Linkname: "regfile", - Mode: 0644, - }, - { - Name: "symlink2", - Typeflag: tar.TypeLink, - Linkname: "symlink1", - Mode: 0644, - }, - { - Name: "regfile", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarInvalidHardlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeLink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeLink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (hardlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try reading victim/hello (hardlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try removing victim directory (hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarInvalidSymlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeSymlink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeSymlink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try removing victim directory (symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try writing to victim/newdir/newfile with a symlink in the path - { - // this header needs to be before the next one, or else there is an error - Name: "dir/loophole", - Typeflag: tar.TypeSymlink, - Linkname: "../../victim", - Mode: 0755, - }, - { - Name: "dir/loophole/newdir/newfile", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestTempArchiveCloseMultipleTimes(t *testing.T) { - reader := ioutil.NopCloser(strings.NewReader("hello")) - tempArchive, err := NewTempArchive(reader, "") - buf := make([]byte, 10) - n, err := tempArchive.Read(buf) - if n != 5 { - t.Fatalf("Expected to read 5 bytes. Read %d instead", n) - } - for i := 0; i < 3; i++ { - if err = tempArchive.Close(); err != nil { - t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go index abf9ad78..fbc3bb8c 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -19,7 +19,7 @@ func fixVolumePathPrefix(srcPath string) string { } // getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a seperate function as this is platform specific. On Linux, we +// We use a separate function as this is platform specific. On Linux, we // can't use filepath.Join(srcPath,include) because this will clean away // a trailing "." or "/" which may be important. func getWalkRoot(srcPath string, include string) string { diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go deleted file mode 100644 index 18f45c48..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build !windows - -package archive - -import ( - "os" - "testing" -) - -func TestCanonicalTarNameForPath(t *testing.T) { - cases := []struct{ in, expected string }{ - {"foo", "foo"}, - {"foo/bar", "foo/bar"}, - {"foo/dir/", "foo/dir/"}, - } - for _, v := range cases { - if out, err := CanonicalTarNameForPath(v.in); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestCanonicalTarName(t *testing.T) { - cases := []struct { - in string - isDir bool - expected string - }{ - {"foo", false, "foo"}, - {"foo", true, "foo/"}, - {"foo/bar", false, "foo/bar"}, - {"foo/bar", true, "foo/bar/"}, - } - for _, v := range cases { - if out, err := canonicalTarName(v.in, v.isDir); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestChmodTarEntry(t *testing.T) { - cases := []struct { - in, expected os.FileMode - }{ - {0000, 0000}, - {0777, 0777}, - {0644, 0644}, - {0755, 0755}, - {0444, 0444}, - } - for _, v := range cases { - if out := chmodTarEntry(v.in); out != v.expected { - t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go deleted file mode 100644 index b348cde6..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build windows - -package archive - -import ( - "archive/tar" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/longpath" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return longpath.AddPrefix(srcPath) -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a seperate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - // windows: convert windows style relative path with backslashes - // into forward slashes. Since windows does not allow '/' or '\' - // in file names, it is mostly safe to replace however we must - // check just in case - if strings.Contains(p, "/") { - return "", fmt.Errorf("Windows path contains forward slash: %s", p) - } - return strings.Replace(p, string(os.PathSeparator), "/", -1), nil - -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - perm &= 0755 - // Add the x bit: make everything +x from windows - perm |= 0111 - - return perm -} - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows - return -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} - -func getFileUIDGID(stat interface{}) (int, int, error) { - // no notion of file ownership mapping yet on Windows - return 0, 0, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go deleted file mode 100644 index b7abc402..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// +build windows - -package archive - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestCopyFileWithInvalidDest(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := "c:dest" - srcFolder := filepath.Join(folder, "src") - src := filepath.Join(folder, "src", "src") - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest) - if err == nil { - t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") - } -} - -func TestCanonicalTarNameForPath(t *testing.T) { - cases := []struct { - in, expected string - shouldFail bool - }{ - {"foo", "foo", false}, - {"foo/bar", "___", true}, // unix-styled windows path must fail - {`foo\bar`, "foo/bar", false}, - } - for _, v := range cases { - if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if v.shouldFail && err == nil { - t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) - } else if !v.shouldFail && out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestCanonicalTarName(t *testing.T) { - cases := []struct { - in string - isDir bool - expected string - }{ - {"foo", false, "foo"}, - {"foo", true, "foo/"}, - {`foo\bar`, false, "foo/bar"}, - {`foo\bar`, true, "foo/bar/"}, - } - for _, v := range cases { - if out, err := canonicalTarName(v.in, v.isDir); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestChmodTarEntry(t *testing.T) { - cases := []struct { - in, expected os.FileMode - }{ - {0000, 0111}, - {0777, 0755}, - {0644, 0755}, - {0755, 0755}, - {0444, 0555}, - } - for _, v := range cases { - if out := chmodTarEntry(v.in); out != v.expected { - t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go index e0bd4c46..81651c61 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -150,7 +150,7 @@ func Changes(layers []string, rw string) ([]Change, error) { // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directoriy in the rw and ro layers are all equal. + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. // Check https://github.com/docker/docker/pull/13590 for details. if f.IsDir() { changedDirs[path] = struct{}{} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go deleted file mode 100644 index 5a3282b5..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package archive - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "sort" - "testing" -) - -func TestHardLinkOrder(t *testing.T) { - names := []string{"file1.txt", "file2.txt", "file3.txt"} - msg := []byte("Hey y'all") - - // Create dir - src, err := ioutil.TempDir("", "docker-hardlink-test-src-") - if err != nil { - t.Fatal(err) - } - //defer os.RemoveAll(src) - for _, name := range names { - func() { - fh, err := os.Create(path.Join(src, name)) - if err != nil { - t.Fatal(err) - } - defer fh.Close() - if _, err = fh.Write(msg); err != nil { - t.Fatal(err) - } - }() - } - // Create dest, with changes that includes hardlinks - dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") - if err != nil { - t.Fatal(err) - } - os.RemoveAll(dest) // we just want the name, at first - if err := copyDir(src, dest); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dest) - for _, name := range names { - for i := 0; i < 5; i++ { - if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { - t.Fatal(err) - } - } - } - - // get changes - changes, err := ChangesDirs(dest, src) - if err != nil { - t.Fatal(err) - } - - // sort - sort.Sort(changesByPath(changes)) - - // ExportChanges - ar, err := ExportChanges(dest, changes, nil, nil) - if err != nil { - t.Fatal(err) - } - hdrs, err := walkHeaders(ar) - if err != nil { - t.Fatal(err) - } - - // reverse sort - sort.Sort(sort.Reverse(changesByPath(changes))) - // ExportChanges - arRev, err := ExportChanges(dest, changes, nil, nil) - if err != nil { - t.Fatal(err) - } - hdrsRev, err := walkHeaders(arRev) - if err != nil { - t.Fatal(err) - } - - // line up the two sets - sort.Sort(tarHeaders(hdrs)) - sort.Sort(tarHeaders(hdrsRev)) - - // compare Size and LinkName - for i := range hdrs { - if hdrs[i].Name != hdrsRev[i].Name { - t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) - } - if hdrs[i].Size != hdrsRev[i].Size { - t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) - } - if hdrs[i].Typeflag != hdrsRev[i].Typeflag { - t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) - } - if hdrs[i].Linkname != hdrsRev[i].Linkname { - t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) - } - } - -} - -type tarHeaders []tar.Header - -func (th tarHeaders) Len() int { return len(th) } -func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } -func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } - -func walkHeaders(r io.Reader) ([]tar.Header, error) { - t := tar.NewReader(r) - headers := []tar.Header{} - for { - hdr, err := t.Next() - if err != nil { - if err == io.EOF { - break - } - return headers, err - } - headers = append(headers, *hdr) - } - return headers, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_test.go deleted file mode 100644 index 52daaa64..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_test.go +++ /dev/null @@ -1,524 +0,0 @@ -package archive - -import ( - "io/ioutil" - "os" - "os/exec" - "path" - "sort" - "testing" - "time" -) - -func max(x, y int) int { - if x >= y { - return x - } - return y -} - -func copyDir(src, dst string) error { - cmd := exec.Command("cp", "-a", src, dst) - if err := cmd.Run(); err != nil { - return err - } - return nil -} - -type FileType uint32 - -const ( - Regular FileType = iota - Dir - Symlink -) - -type FileData struct { - filetype FileType - path string - contents string - permissions os.FileMode -} - -func createSampleDir(t *testing.T, root string) { - files := []FileData{ - {Regular, "file1", "file1\n", 0600}, - {Regular, "file2", "file2\n", 0666}, - {Regular, "file3", "file3\n", 0404}, - {Regular, "file4", "file4\n", 0600}, - {Regular, "file5", "file5\n", 0600}, - {Regular, "file6", "file6\n", 0600}, - {Regular, "file7", "file7\n", 0600}, - {Dir, "dir1", "", 0740}, - {Regular, "dir1/file1-1", "file1-1\n", 01444}, - {Regular, "dir1/file1-2", "file1-2\n", 0666}, - {Dir, "dir2", "", 0700}, - {Regular, "dir2/file2-1", "file2-1\n", 0666}, - {Regular, "dir2/file2-2", "file2-2\n", 0666}, - {Dir, "dir3", "", 0700}, - {Regular, "dir3/file3-1", "file3-1\n", 0666}, - {Regular, "dir3/file3-2", "file3-2\n", 0666}, - {Dir, "dir4", "", 0700}, - {Regular, "dir4/file3-1", "file4-1\n", 0666}, - {Regular, "dir4/file3-2", "file4-2\n", 0666}, - {Symlink, "symlink1", "target1", 0666}, - {Symlink, "symlink2", "target2", 0666}, - } - - now := time.Now() - for _, info := range files { - p := path.Join(root, info.path) - if info.filetype == Dir { - if err := os.MkdirAll(p, info.permissions); err != nil { - t.Fatal(err) - } - } else if info.filetype == Regular { - if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { - t.Fatal(err) - } - } else if info.filetype == Symlink { - if err := os.Symlink(info.contents, p); err != nil { - t.Fatal(err) - } - } - - if info.filetype != Symlink { - // Set a consistent ctime, atime for all files and dirs - if err := os.Chtimes(p, now, now); err != nil { - t.Fatal(err) - } - } - } -} - -func TestChangeString(t *testing.T) { - modifiyChange := Change{"change", ChangeModify} - toString := modifiyChange.String() - if toString != "C change" { - t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) - } - addChange := Change{"change", ChangeAdd} - toString = addChange.String() - if toString != "A change" { - t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) - } - deleteChange := Change{"change", ChangeDelete} - toString = deleteChange.String() - if toString != "D change" { - t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) - } -} - -func TestChangesWithNoChanges(t *testing.T) { - rwLayer, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(rwLayer) - layer, err := ioutil.TempDir("", "docker-changes-test-layer") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(layer) - createSampleDir(t, layer) - changes, err := Changes([]string{layer}, rwLayer) - if err != nil { - t.Fatal(err) - } - if len(changes) != 0 { - t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) - } -} - -func TestChangesWithChanges(t *testing.T) { - // Mock the readonly layer - layer, err := ioutil.TempDir("", "docker-changes-test-layer") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(layer) - createSampleDir(t, layer) - os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) - - // Mock the RW layer - rwLayer, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(rwLayer) - - // Create a folder in RW layer - dir1 := path.Join(rwLayer, "dir1") - os.MkdirAll(dir1, 0740) - deletedFile := path.Join(dir1, ".wh.file1-2") - ioutil.WriteFile(deletedFile, []byte{}, 0600) - modifiedFile := path.Join(dir1, "file1-1") - ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) - // Let's add a subfolder for a newFile - subfolder := path.Join(dir1, "subfolder") - os.MkdirAll(subfolder, 0740) - newFile := path.Join(subfolder, "newFile") - ioutil.WriteFile(newFile, []byte{}, 0740) - - changes, err := Changes([]string{layer}, rwLayer) - if err != nil { - t.Fatal(err) - } - - expectedChanges := []Change{ - {"/dir1", ChangeModify}, - {"/dir1/file1-1", ChangeModify}, - {"/dir1/file1-2", ChangeDelete}, - {"/dir1/subfolder", ChangeModify}, - {"/dir1/subfolder/newFile", ChangeAdd}, - } - checkChanges(expectedChanges, changes, t) -} - -// See https://github.com/docker/docker/pull/13590 -func TestChangesWithChangesGH13590(t *testing.T) { - baseLayer, err := ioutil.TempDir("", "docker-changes-test.") - defer os.RemoveAll(baseLayer) - - dir3 := path.Join(baseLayer, "dir1/dir2/dir3") - os.MkdirAll(dir3, 07400) - - file := path.Join(dir3, "file.txt") - ioutil.WriteFile(file, []byte("hello"), 0666) - - layer, err := ioutil.TempDir("", "docker-changes-test2.") - defer os.RemoveAll(layer) - - // Test creating a new file - if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { - t.Fatalf("Cmd failed: %q", err) - } - - os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) - file = path.Join(layer, "dir1/dir2/dir3/file1.txt") - ioutil.WriteFile(file, []byte("bye"), 0666) - - changes, err := Changes([]string{baseLayer}, layer) - if err != nil { - t.Fatal(err) - } - - expectedChanges := []Change{ - {"/dir1/dir2/dir3", ChangeModify}, - {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, - } - checkChanges(expectedChanges, changes, t) - - // Now test changing a file - layer, err = ioutil.TempDir("", "docker-changes-test3.") - defer os.RemoveAll(layer) - - if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { - t.Fatalf("Cmd failed: %q", err) - } - - file = path.Join(layer, "dir1/dir2/dir3/file.txt") - ioutil.WriteFile(file, []byte("bye"), 0666) - - changes, err = Changes([]string{baseLayer}, layer) - if err != nil { - t.Fatal(err) - } - - expectedChanges = []Change{ - {"/dir1/dir2/dir3/file.txt", ChangeModify}, - } - checkChanges(expectedChanges, changes, t) -} - -// Create an directory, copy it, make sure we report no changes between the two -func TestChangesDirsEmpty(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(src) - createSampleDir(t, src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dst) - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - if len(changes) != 0 { - t.Fatalf("Reported changes for identical dirs: %v", changes) - } - os.RemoveAll(src) - os.RemoveAll(dst) -} - -func mutateSampleDir(t *testing.T, root string) { - // Remove a regular file - if err := os.RemoveAll(path.Join(root, "file1")); err != nil { - t.Fatal(err) - } - - // Remove a directory - if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { - t.Fatal(err) - } - - // Remove a symlink - if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { - t.Fatal(err) - } - - // Rewrite a file - if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { - t.Fatal(err) - } - - // Replace a file - if err := os.RemoveAll(path.Join(root, "file3")); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { - t.Fatal(err) - } - - // Touch file - if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { - t.Fatal(err) - } - - // Replace file with dir - if err := os.RemoveAll(path.Join(root, "file5")); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { - t.Fatal(err) - } - - // Create new file - if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { - t.Fatal(err) - } - - // Create new dir - if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { - t.Fatal(err) - } - - // Create a new symlink - if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { - t.Fatal(err) - } - - // Change a symlink - if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { - t.Fatal(err) - } - if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { - t.Fatal(err) - } - - // Replace dir with file - if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { - t.Fatal(err) - } - - // Touch dir - if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { - t.Fatal(err) - } -} - -func TestChangesDirsMutated(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - createSampleDir(t, src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(src) - defer os.RemoveAll(dst) - - mutateSampleDir(t, dst) - - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - sort.Sort(changesByPath(changes)) - - expectedChanges := []Change{ - {"/dir1", ChangeDelete}, - {"/dir2", ChangeModify}, - {"/dirnew", ChangeAdd}, - {"/file1", ChangeDelete}, - {"/file2", ChangeModify}, - {"/file3", ChangeModify}, - {"/file4", ChangeModify}, - {"/file5", ChangeModify}, - {"/filenew", ChangeAdd}, - {"/symlink1", ChangeDelete}, - {"/symlink2", ChangeModify}, - {"/symlinknew", ChangeAdd}, - } - - for i := 0; i < max(len(changes), len(expectedChanges)); i++ { - if i >= len(expectedChanges) { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } - if i >= len(changes) { - t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) - } - if changes[i].Path == expectedChanges[i].Path { - if changes[i] != expectedChanges[i] { - t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) - } - } else if changes[i].Path < expectedChanges[i].Path { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } else { - t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) - } - } -} - -func TestApplyLayer(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - createSampleDir(t, src) - defer os.RemoveAll(src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - mutateSampleDir(t, dst) - defer os.RemoveAll(dst) - - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - layer, err := ExportChanges(dst, changes, nil, nil) - if err != nil { - t.Fatal(err) - } - - layerCopy, err := NewTempArchive(layer, "") - if err != nil { - t.Fatal(err) - } - - if _, err := ApplyLayer(src, layerCopy); err != nil { - t.Fatal(err) - } - - changes2, err := ChangesDirs(src, dst) - if err != nil { - t.Fatal(err) - } - - if len(changes2) != 0 { - t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) - } -} - -func TestChangesSizeWithHardlinks(t *testing.T) { - srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(srcDir) - - destDir, err := ioutil.TempDir("", "docker-test-destDir") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(destDir) - - creationSize, err := prepareUntarSourceDirectory(100, destDir, true) - if err != nil { - t.Fatal(err) - } - - changes, err := ChangesDirs(destDir, srcDir) - if err != nil { - t.Fatal(err) - } - - got := ChangesSize(destDir, changes) - if got != int64(creationSize) { - t.Errorf("Expected %d bytes of changes, got %d", creationSize, got) - } -} - -func TestChangesSizeWithNoChanges(t *testing.T) { - size := ChangesSize("/tmp", nil) - if size != 0 { - t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) - } -} - -func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { - changes := []Change{ - {Path: "deletedPath", Kind: ChangeDelete}, - } - size := ChangesSize("/tmp", changes) - if size != 0 { - t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) - } -} - -func TestChangesSize(t *testing.T) { - parentPath, err := ioutil.TempDir("", "docker-changes-test") - defer os.RemoveAll(parentPath) - addition := path.Join(parentPath, "addition") - if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { - t.Fatal(err) - } - modification := path.Join(parentPath, "modification") - if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { - t.Fatal(err) - } - changes := []Change{ - {Path: "addition", Kind: ChangeAdd}, - {Path: "modification", Kind: ChangeModify}, - } - size := ChangesSize(parentPath, changes) - if size != 6 { - t.Fatalf("Expected 6 bytes of changes, got %d", size) - } -} - -func checkChanges(expectedChanges, changes []Change, t *testing.T) { - sort.Sort(changesByPath(expectedChanges)) - sort.Sort(changesByPath(changes)) - for i := 0; i < max(len(changes), len(expectedChanges)); i++ { - if i >= len(expectedChanges) { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } - if i >= len(changes) { - t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) - } - if changes[i].Path == expectedChanges[i].Path { - if changes[i] != expectedChanges[i] { - t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) - } - } else if changes[i].Path < expectedChanges[i].Path { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } else { - t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go deleted file mode 100644 index af94243f..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package archive - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - - // Don't look at size for dirs, its not a good measure of change - if oldStat.ModTime() != newStat.ModTime() || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go index ecfd0a9c..e1fa73f3 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -135,30 +135,17 @@ type CopyInfo struct { // operation. The given path should be an absolute local path. A source path // has all symlinks evaluated that appear before the last path separator ("/" // on Unix). As it is to be a copy source, the path must exist. -func CopyInfoSourcePath(path string) (CopyInfo, error) { - // Split the given path into its Directory and Base components. We will - // evaluate symlinks in the directory component then append the base. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set path = normalizePath(path) - dirPath, basePath := filepath.Split(path) - resolvedDirPath, err := filepath.EvalSymlinks(dirPath) + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) if err != nil { return CopyInfo{}, err } - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath := resolvedDirPath + string(filepath.Separator) + basePath - - var rebaseName string - if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { - // In the case where the path had a trailing separator and a symlink - // evaluation has changed the last path component, we will need to - // rebase the name in the archive that is being copied to match the - // originally requested name. - rebaseName = filepath.Base(path) - } - stat, err := os.Lstat(resolvedPath) if err != nil { return CopyInfo{}, err @@ -279,7 +266,10 @@ func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir st // The destination exists as some type of file and the source content // is also a file. The source content entry will have to be renamed to // have a basename which matches the destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil case srcInfo.IsDir: // The destination does not exist and the source content is an archive // of a directory. The archive should be extracted to the parent of @@ -287,7 +277,10 @@ func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir st // created as a result should take the name of the destination path. // The source content entries will have to be renamed to have a // basename which matches the destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil case assertsDirectory(dstInfo.Path): // The destination does not exist and is asserted to be created as a // directory, but the source content is not a directory. This is an @@ -301,14 +294,17 @@ func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir st // to be created when the archive is extracted and the source content // entry will have to be renamed to have a basename which matches the // destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil } } -// rebaseArchiveEntries rewrites the given srcContent archive replacing +// RebaseArchiveEntries rewrites the given srcContent archive replacing // an occurrence of oldBase with newBase at the beginning of entry names. -func rebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { +func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { if oldBase == string(os.PathSeparator) { // If oldBase specifies the root directory, use an empty string as // oldBase instead so that newBase doesn't replace the path separator @@ -355,7 +351,7 @@ func rebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { // CopyResource performs an archive copy from the given source path to the // given destination path. The source path MUST exist and the destination // path's parent directory must exist. -func CopyResource(srcPath, dstPath string) error { +func CopyResource(srcPath, dstPath string, followLink bool) error { var ( srcInfo CopyInfo err error @@ -369,7 +365,7 @@ func CopyResource(srcPath, dstPath string) error { srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) - if srcInfo, err = CopyInfoSourcePath(srcPath); err != nil { + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { return err } @@ -405,3 +401,58 @@ func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { return Untar(copyArchive, dstDir, options) } + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_test.go b/vendor/github.com/docker/docker/pkg/archive/copy_test.go deleted file mode 100644 index 25f1811a..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_test.go +++ /dev/null @@ -1,625 +0,0 @@ -package archive - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" -) - -func removeAllPaths(paths ...string) { - for _, path := range paths { - os.RemoveAll(path) - } -} - -func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { - var err error - - if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { - t.Fatal(err) - } - - if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { - t.Fatal(err) - } - - return -} - -func isNotDir(err error) bool { - return strings.Contains(err.Error(), "not a directory") -} - -func joinTrailingSep(pathElements ...string) string { - joined := filepath.Join(pathElements...) - - return fmt.Sprintf("%s%c", joined, filepath.Separator) -} - -func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { - t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) - - fileA, err := os.Open(filenameA) - if err != nil { - return - } - defer fileA.Close() - - fileB, err := os.Open(filenameB) - if err != nil { - return - } - defer fileB.Close() - - hasher := sha256.New() - - if _, err = io.Copy(hasher, fileA); err != nil { - return - } - - hashA := hasher.Sum(nil) - hasher.Reset() - - if _, err = io.Copy(hasher, fileB); err != nil { - return - } - - hashB := hasher.Sum(nil) - - if !bytes.Equal(hashA, hashB) { - err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) - } - - return -} - -func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { - t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) - - var changes []Change - - if changes, err = ChangesDirs(newDir, oldDir); err != nil { - return - } - - if len(changes) != 0 { - err = fmt.Errorf("expected no changes between directories, but got: %v", changes) - } - - return -} - -func logDirContents(t *testing.T, dirPath string) { - logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { - if err != nil { - t.Errorf("stat error for path %q: %s", path, err) - return nil - } - - if info.IsDir() { - path = joinTrailingSep(path) - } - - t.Logf("\t%s", path) - - return nil - }) - - t.Logf("logging directory contents: %q", dirPath) - - if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { - t.Fatal(err) - } -} - -func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { - t.Logf("copying from %q to %q", srcPath, dstPath) - - return CopyResource(srcPath, dstPath) -} - -// Basic assumptions about SRC and DST: -// 1. SRC must exist. -// 2. If SRC ends with a trailing separator, it must be a directory. -// 3. DST parent directory must exist. -// 4. If DST exists as a file, it must not end with a trailing separator. - -// First get these easy error cases out of the way. - -// Test for error when SRC does not exist. -func TestCopyErrSrcNotExists(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1")); !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } -} - -// Test for error when SRC ends in a trailing -// path separator but it exists as a file. -func TestCopyErrSrcNotDir(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1")); !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } -} - -// Test for error when SRC is a valid file or directory, -// but the DST parent directory does not exist. -func TestCopyErrDstParentNotExists(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} - - // Try with a file source. - content, err := TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - // Copy to a file whose parent does not exist. - if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { - t.Fatal("expected IsNotExist error, but got nil instead") - } - - if !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } - - // Try with a directory source. - srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} - - content, err = TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - // Copy to a directory whose parent does not exist. - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { - t.Fatal("expected IsNotExist error, but got nil instead") - } - - if !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } -} - -// Test for error when DST ends in a trailing -// path separator but exists as a file. -func TestCopyErrDstNotDir(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - // Try with a file source. - srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} - - content, err := TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { - t.Fatal("expected IsNotDir error, but got nil instead") - } - - if !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } - - // Try with a directory source. - srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} - - content, err = TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { - t.Fatal("expected IsNotDir error, but got nil instead") - } - - if !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } -} - -// Possibilities are reduced to the remaining 10 cases: -// -// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action -// =================================================================================================== -// A | no | - | no | - | no | create file -// B | no | - | no | - | yes | error -// C | no | - | yes | no | - | overwrite file -// D | no | - | yes | yes | - | create file in dst dir -// E | yes | no | no | - | - | create dir, copy contents -// F | yes | no | yes | no | - | error -// G | yes | no | yes | yes | - | copy dir and contents -// H | yes | yes | no | - | - | create dir, copy contents -// I | yes | yes | yes | no | - | error -// J | yes | yes | yes | yes | - | copy dir contents -// - -// A. SRC specifies a file and DST (no trailing path separator) doesn't -// exist. This should create a file with the name DST and copy the -// contents of the source file into it. -func TestCopyCaseA(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcPath := filepath.Join(tmpDirA, "file1") - dstPath := filepath.Join(tmpDirB, "itWorks.txt") - - var err error - - if err = testCopyHelper(t, srcPath, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } -} - -// B. SRC specifies a file and DST (with trailing path separator) doesn't -// exist. This should cause an error because the copy operation cannot -// create a directory when copying a single file. -func TestCopyCaseB(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcPath := filepath.Join(tmpDirA, "file1") - dstDir := joinTrailingSep(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcPath, dstDir); err == nil { - t.Fatal("expected ErrDirNotExists error, but got nil instead") - } - - if err != ErrDirNotExists { - t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) - } -} - -// C. SRC specifies a file and DST exists as a file. This should overwrite -// the file at DST with the contents of the source file. -func TestCopyCaseC(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcPath := filepath.Join(tmpDirA, "file1") - dstPath := filepath.Join(tmpDirB, "file2") - - var err error - - // Ensure they start out different. - if err = fileContentsEqual(t, srcPath, dstPath); err == nil { - t.Fatal("expected different file contents") - } - - if err = testCopyHelper(t, srcPath, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } -} - -// D. SRC specifies a file and DST exists as a directory. This should place -// a copy of the source file inside it using the basename from SRC. Ensure -// this works whether DST has a trailing path separator or not. -func TestCopyCaseD(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcPath := filepath.Join(tmpDirA, "file1") - dstDir := filepath.Join(tmpDirB, "dir1") - dstPath := filepath.Join(dstDir, "file1") - - var err error - - // Ensure that dstPath doesn't exist. - if _, err = os.Stat(dstPath); !os.IsNotExist(err) { - t.Fatalf("did not expect dstPath %q to exist", dstPath) - } - - if err = testCopyHelper(t, srcPath, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir1") - - if err = testCopyHelper(t, srcPath, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } -} - -// E. SRC specifies a directory and DST does not exist. This should create a -// directory at DST and copy the contents of the SRC directory into the DST -// directory. Ensure this works whether DST has a trailing path separator or -// not. -func TestCopyCaseE(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcDir := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "testDir") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } -} - -// F. SRC specifies a directory and DST exists as a file. This should cause an -// error as it is not possible to overwrite a file with a directory. -func TestCopyCaseF(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := filepath.Join(tmpDirA, "dir1") - dstFile := filepath.Join(tmpDirB, "file1") - - var err error - - if err = testCopyHelper(t, srcDir, dstFile); err == nil { - t.Fatal("expected ErrCannotCopyDir error, but got nil instead") - } - - if err != ErrCannotCopyDir { - t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) - } -} - -// G. SRC specifies a directory and DST exists as a directory. This should copy -// the SRC directory and all its contents to the DST directory. Ensure this -// works whether DST has a trailing path separator or not. -func TestCopyCaseG(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "dir2") - resultDir := filepath.Join(dstDir, "dir1") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, resultDir, srcDir); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir2") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, resultDir, srcDir); err != nil { - t.Fatal(err) - } -} - -// H. SRC specifies a directory's contents only and DST does not exist. This -// should create a directory at DST and copy the contents of the SRC -// directory (but not the directory itself) into the DST directory. Ensure -// this works whether DST has a trailing path separator or not. -func TestCopyCaseH(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - dstDir := filepath.Join(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "testDir") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } -} - -// I. SRC specifies a directory's contents only and DST exists as a file. This -// should cause an error as it is not possible to overwrite a file with a -// directory. -func TestCopyCaseI(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - dstFile := filepath.Join(tmpDirB, "file1") - - var err error - - if err = testCopyHelper(t, srcDir, dstFile); err == nil { - t.Fatal("expected ErrCannotCopyDir error, but got nil instead") - } - - if err != ErrCannotCopyDir { - t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) - } -} - -// J. SRC specifies a directory's contents only and DST exists as a directory. -// This should copy the contents of the SRC directory (but not the directory -// itself) into the DST directory. Ensure this works whether DST has a -// trailing path separator or not. -func TestCopyCaseJ(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - dstDir := filepath.Join(tmpDirB, "dir5") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir5") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go deleted file mode 100644 index 2b775b45..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_test.go b/vendor/github.com/docker/docker/pkg/archive/diff_test.go deleted file mode 100644 index 2b29992d..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff_test.go +++ /dev/null @@ -1,370 +0,0 @@ -package archive - -import ( - "archive/tar" - "io" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "testing" - - "github.com/docker/docker/pkg/ioutils" -) - -func TestApplyLayerInvalidFilenames(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { - { - Name: "../victim/dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { - { - // Note the leading slash - Name: "/../victim/slash-dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerInvalidHardlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeLink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeLink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (hardlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try reading victim/hello (hardlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try removing victim directory (hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerInvalidSymlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeSymlink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeSymlink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try removing victim directory (symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerWhiteouts(t *testing.T) { - wd, err := ioutil.TempDir("", "graphdriver-test-whiteouts") - if err != nil { - return - } - defer os.RemoveAll(wd) - - base := []string{ - ".baz", - "bar/", - "bar/bax", - "bar/bay/", - "baz", - "foo/", - "foo/.abc", - "foo/.bcd/", - "foo/.bcd/a", - "foo/cde/", - "foo/cde/def", - "foo/cde/efg", - "foo/fgh", - "foobar", - } - - type tcase struct { - change, expected []string - } - - tcases := []tcase{ - { - base, - base, - }, - { - []string{ - ".bay", - ".wh.baz", - "foo/", - "foo/.bce", - "foo/.wh..wh..opq", - "foo/cde/", - "foo/cde/efg", - }, - []string{ - ".bay", - ".baz", - "bar/", - "bar/bax", - "bar/bay/", - "foo/", - "foo/.bce", - "foo/cde/", - "foo/cde/efg", - "foobar", - }, - }, - { - []string{ - ".bay", - ".wh..baz", - ".wh.foobar", - "foo/", - "foo/.abc", - "foo/.wh.cde", - "bar/", - }, - []string{ - ".bay", - "bar/", - "bar/bax", - "bar/bay/", - "foo/", - "foo/.abc", - "foo/.bce", - }, - }, - { - []string{ - ".abc", - ".wh..wh..opq", - "foobar", - }, - []string{ - ".abc", - "foobar", - }, - }, - } - - for i, tc := range tcases { - l, err := makeTestLayer(tc.change) - if err != nil { - t.Fatal(err) - } - - _, err = UnpackLayer(wd, l, nil) - if err != nil { - t.Fatal(err) - } - err = l.Close() - if err != nil { - t.Fatal(err) - } - - paths, err := readDirContents(wd) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(tc.expected, paths) { - t.Fatalf("invalid files for layer %d: expected %q, got %q", i, tc.expected, paths) - } - } - -} - -func makeTestLayer(paths []string) (rc io.ReadCloser, err error) { - tmpDir, err := ioutil.TempDir("", "graphdriver-test-mklayer") - if err != nil { - return - } - defer func() { - if err != nil { - os.RemoveAll(tmpDir) - } - }() - for _, p := range paths { - if p[len(p)-1] == filepath.Separator { - if err = os.MkdirAll(filepath.Join(tmpDir, p), 0700); err != nil { - return - } - } else { - if err = ioutil.WriteFile(filepath.Join(tmpDir, p), nil, 0600); err != nil { - return - } - } - } - archive, err := Tar(tmpDir, Uncompressed) - if err != nil { - return - } - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - os.RemoveAll(tmpDir) - return err - }), nil -} - -func readDirContents(root string) ([]string, error) { - var files []string - err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if path == root { - return nil - } - rel, err := filepath.Rel(root, path) - if err != nil { - return err - } - if info.IsDir() { - rel = rel + "/" - } - files = append(files, rel) - return nil - }) - if err != nil { - return nil, err - } - return files, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/utils_test.go b/vendor/github.com/docker/docker/pkg/archive/utils_test.go deleted file mode 100644 index 98719032..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/utils_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" -) - -var testUntarFns = map[string]func(string, io.Reader) error{ - "untar": func(dest string, r io.Reader) error { - return Untar(r, dest, nil) - }, - "applylayer": func(dest string, r io.Reader) error { - _, err := ApplyLayer(dest, Reader(r)) - return err - }, -} - -// testBreakout is a helper function that, within the provided `tmpdir` directory, -// creates a `victim` folder with a generated `hello` file in it. -// `untar` extracts to a directory named `dest`, the tar file created from `headers`. -// -// Here are the tested scenarios: -// - removed `victim` folder (write) -// - removed files from `victim` folder (write) -// - new files in `victim` folder (write) -// - modified files in `victim` folder (write) -// - file in `dest` with same content as `victim/hello` (read) -// -// When using testBreakout make sure you cover one of the scenarios listed above. -func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { - tmpdir, err := ioutil.TempDir("", tmpdir) - if err != nil { - return err - } - defer os.RemoveAll(tmpdir) - - dest := filepath.Join(tmpdir, "dest") - if err := os.Mkdir(dest, 0755); err != nil { - return err - } - - victim := filepath.Join(tmpdir, "victim") - if err := os.Mkdir(victim, 0755); err != nil { - return err - } - hello := filepath.Join(victim, "hello") - helloData, err := time.Now().MarshalText() - if err != nil { - return err - } - if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { - return err - } - helloStat, err := os.Stat(hello) - if err != nil { - return err - } - - reader, writer := io.Pipe() - go func() { - t := tar.NewWriter(writer) - for _, hdr := range headers { - t.WriteHeader(hdr) - } - t.Close() - }() - - untar := testUntarFns[untarFn] - if untar == nil { - return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) - } - if err := untar(dest, reader); err != nil { - if _, ok := err.(breakoutError); !ok { - // If untar returns an error unrelated to an archive breakout, - // then consider this an unexpected error and abort. - return err - } - // Here, untar detected the breakout. - // Let's move on verifying that indeed there was no breakout. - fmt.Printf("breakoutError: %v\n", err) - } - - // Check victim folder - f, err := os.Open(victim) - if err != nil { - // codepath taken if victim folder was removed - return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) - } - defer f.Close() - - // Check contents of victim folder - // - // We are only interested in getting 2 files from the victim folder, because if all is well - // we expect only one result, the `hello` file. If there is a second result, it cannot - // hold the same name `hello` and we assume that a new file got created in the victim folder. - // That is enough to detect an archive breakout. - names, err := f.Readdirnames(2) - if err != nil { - // codepath taken if victim is not a folder - return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) - } - for _, name := range names { - if name != "hello" { - // codepath taken if new file was created in victim folder - return fmt.Errorf("archive breakout: new file %q", name) - } - } - - // Check victim/hello - f, err = os.Open(hello) - if err != nil { - // codepath taken if read permissions were removed - return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) - } - defer f.Close() - b, err := ioutil.ReadAll(f) - if err != nil { - return err - } - fi, err := f.Stat() - if err != nil { - return err - } - if helloStat.IsDir() != fi.IsDir() || - // TODO: cannot check for fi.ModTime() change - helloStat.Mode() != fi.Mode() || - helloStat.Size() != fi.Size() || - !bytes.Equal(helloData, b) { - // codepath taken if hello has been modified - return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) - } - - // Check that nothing in dest/ has the same content as victim/hello. - // Since victim/hello was generated with time.Now(), it is safe to assume - // that any file whose content matches exactly victim/hello, managed somehow - // to access victim/hello. - return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { - if info.IsDir() { - if err != nil { - // skip directory if error - return filepath.SkipDir - } - // enter directory - return nil - } - if err != nil { - // skip file if error - return nil - } - b, err := ioutil.ReadFile(path) - if err != nil { - // Houston, we have a problem. Aborting (space)walk. - return err - } - if bytes.Equal(helloData, b) { - return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) - } - return nil - }) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go index 3d9c3132..d20478a1 100644 --- a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go +++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go @@ -9,7 +9,7 @@ package archive const WhiteoutPrefix = ".wh." // WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for remoing an actaul file. Normally these files are excluded from exported +// for removing an actual file. Normally these files are excluded from exported // archives. const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap_test.go b/vendor/github.com/docker/docker/pkg/archive/wrap_test.go deleted file mode 100644 index 46ab3669..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/wrap_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "io" - "testing" -) - -func TestGenerateEmptyFile(t *testing.T) { - archive, err := Generate("emptyFile") - if err != nil { - t.Fatal(err) - } - if archive == nil { - t.Fatal("The generated archive should not be nil.") - } - - expectedFiles := [][]string{ - {"emptyFile", ""}, - } - - tr := tar.NewReader(archive) - actualFiles := make([][]string, 0, 10) - i := 0 - for { - hdr, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - buf := new(bytes.Buffer) - buf.ReadFrom(tr) - content := buf.String() - actualFiles = append(actualFiles, []string{hdr.Name, content}) - i++ - } - if len(actualFiles) != len(expectedFiles) { - t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) - } - for i := 0; i < len(expectedFiles); i++ { - actual := actualFiles[i] - expected := expectedFiles[i] - if actual[0] != expected[0] { - t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) - } - if actual[1] != expected[1] { - t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) - } - } -} - -func TestGenerateWithContent(t *testing.T) { - archive, err := Generate("file", "content") - if err != nil { - t.Fatal(err) - } - if archive == nil { - t.Fatal("The generated archive should not be nil.") - } - - expectedFiles := [][]string{ - {"file", "content"}, - } - - tr := tar.NewReader(archive) - actualFiles := make([][]string, 0, 10) - i := 0 - for { - hdr, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - buf := new(bytes.Buffer) - buf.ReadFrom(tr) - content := buf.String() - actualFiles = append(actualFiles, []string{hdr.Name, content}) - i++ - } - if len(actualFiles) != len(expectedFiles) { - t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) - } - for i := 0; i < len(expectedFiles); i++ { - actual := actualFiles[i] - expected := expectedFiles[i] - if actual[0] != expected[0] { - t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) - } - if actual[1] != expected[1] { - t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/authorization/api.go b/vendor/github.com/docker/docker/pkg/authorization/api.go new file mode 100644 index 00000000..fc82c46b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/api.go @@ -0,0 +1,54 @@ +package authorization + +const ( + // AuthZApiRequest is the url for daemon request authorization + AuthZApiRequest = "AuthZPlugin.AuthZReq" + + // AuthZApiResponse is the url for daemon response authorization + AuthZApiResponse = "AuthZPlugin.AuthZRes" + + // AuthZApiImplements is the name of the interface all AuthZ plugins implement + AuthZApiImplements = "authz" +) + +// Request holds data required for authZ plugins +type Request struct { + // User holds the user extracted by AuthN mechanism + User string `json:"User,omitempty"` + + // UserAuthNMethod holds the mechanism used to extract user details (e.g., krb) + UserAuthNMethod string `json:"UserAuthNMethod,omitempty"` + + // RequestMethod holds the HTTP method (GET/POST/PUT) + RequestMethod string `json:"RequestMethod,omitempty"` + + // RequestUri holds the full HTTP uri (e.g., /v1.21/version) + RequestURI string `json:"RequestUri,omitempty"` + + // RequestBody stores the raw request body sent to the docker daemon + RequestBody []byte `json:"RequestBody,omitempty"` + + // RequestHeaders stores the raw request headers sent to the docker daemon + RequestHeaders map[string]string `json:"RequestHeaders,omitempty"` + + // ResponseStatusCode stores the status code returned from docker daemon + ResponseStatusCode int `json:"ResponseStatusCode,omitempty"` + + // ResponseBody stores the raw response body sent from docker daemon + ResponseBody []byte `json:"ResponseBody,omitempty"` + + // ResponseHeaders stores the response headers sent to the docker daemon + ResponseHeaders map[string]string `json:"ResponseHeaders,omitempty"` +} + +// Response represents authZ plugin response +type Response struct { + // Allow indicating whether the user is allowed or not + Allow bool `json:"Allow"` + + // Msg stores the authorization message + Msg string `json:"Msg,omitempty"` + + // Err stores a message in case there's an error + Err string `json:"Err,omitempty"` +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/authz.go b/vendor/github.com/docker/docker/pkg/authorization/authz.go new file mode 100644 index 00000000..f7039086 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/authz.go @@ -0,0 +1,165 @@ +package authorization + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/ioutils" +) + +const maxBodySize = 1048576 // 1MB + +// NewCtx creates new authZ context, it is used to store authorization information related to a specific docker +// REST http session +// A context provides two method: +// Authenticate Request: +// Call authZ plugins with current REST request and AuthN response +// Request contains full HTTP packet sent to the docker daemon +// https://docs.docker.com/reference/api/docker_remote_api/ +// +// Authenticate Response: +// Call authZ plugins with full info about current REST request, REST response and AuthN response +// The response from this method may contains content that overrides the daemon response +// This allows authZ plugins to filter privileged content +// +// If multiple authZ plugins are specified, the block/allow decision is based on ANDing all plugin results +// For response manipulation, the response from each plugin is piped between plugins. Plugin execution order +// is determined according to daemon parameters +func NewCtx(authZPlugins []Plugin, user, userAuthNMethod, requestMethod, requestURI string) *Ctx { + return &Ctx{ + plugins: authZPlugins, + user: user, + userAuthNMethod: userAuthNMethod, + requestMethod: requestMethod, + requestURI: requestURI, + } +} + +// Ctx stores a a single request-response interaction context +type Ctx struct { + user string + userAuthNMethod string + requestMethod string + requestURI string + plugins []Plugin + // authReq stores the cached request object for the current transaction + authReq *Request +} + +// AuthZRequest authorized the request to the docker daemon using authZ plugins +func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error { + var body []byte + if sendBody(ctx.requestURI, r.Header) && r.ContentLength > 0 && r.ContentLength < maxBodySize { + var err error + body, r.Body, err = drainBody(r.Body) + if err != nil { + return err + } + } + + var h bytes.Buffer + if err := r.Header.Write(&h); err != nil { + return err + } + + ctx.authReq = &Request{ + User: ctx.user, + UserAuthNMethod: ctx.userAuthNMethod, + RequestMethod: ctx.requestMethod, + RequestURI: ctx.requestURI, + RequestBody: body, + RequestHeaders: headers(r.Header), + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ request using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZRequest(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return fmt.Errorf("authorization denied by plugin %s: %s", plugin.Name(), authRes.Msg) + } + } + + return nil +} + +// AuthZResponse authorized and manipulates the response from docker daemon using authZ plugins +func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error { + ctx.authReq.ResponseStatusCode = rm.StatusCode() + ctx.authReq.ResponseHeaders = headers(rm.Header()) + + if sendBody(ctx.requestURI, rm.Header()) { + ctx.authReq.ResponseBody = rm.RawBody() + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ response using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZResponse(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return fmt.Errorf("authorization denied by plugin %s: %s", plugin.Name(), authRes.Msg) + } + } + + rm.FlushAll() + + return nil +} + +// drainBody dump the body (if it's length is less than 1MB) without modifying the request state +func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) { + bufReader := bufio.NewReaderSize(body, maxBodySize) + newBody := ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + data, err := bufReader.Peek(maxBodySize) + // Body size exceeds max body size + if err == nil { + logrus.Warnf("Request body is larger than: '%d' skipping body", maxBodySize) + return nil, newBody, nil + } + // Body size is less than maximum size + if err == io.EOF { + return data, newBody, nil + } + // Unknown error + return nil, newBody, err +} + +// sendBody returns true when request/response body should be sent to AuthZPlugin +func sendBody(url string, header http.Header) bool { + // Skip body for auth endpoint + if strings.HasSuffix(url, "/auth") { + return false + } + + // body is sent only for text or json messages + return header.Get("Content-Type") == "application/json" +} + +// headers returns flatten version of the http headers excluding authorization +func headers(header http.Header) map[string]string { + v := make(map[string]string, 0) + for k, values := range header { + // Skip authorization headers + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") { + continue + } + for _, val := range values { + v[k] = val + } + } + return v +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/plugin.go b/vendor/github.com/docker/docker/pkg/authorization/plugin.go new file mode 100644 index 00000000..1b65ac0a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/plugin.go @@ -0,0 +1,83 @@ +package authorization + +import "github.com/docker/docker/pkg/plugins" + +// Plugin allows third party plugins to authorize requests and responses +// in the context of docker API +type Plugin interface { + // Name returns the registered plugin name + Name() string + + // AuthZRequest authorize the request from the client to the daemon + AuthZRequest(*Request) (*Response, error) + + // AuthZResponse authorize the response from the daemon to the client + AuthZResponse(*Request) (*Response, error) +} + +// NewPlugins constructs and initialize the authorization plugins based on plugin names +func NewPlugins(names []string) []Plugin { + plugins := []Plugin{} + pluginsMap := make(map[string]struct{}) + for _, name := range names { + if _, ok := pluginsMap[name]; ok { + continue + } + pluginsMap[name] = struct{}{} + plugins = append(plugins, newAuthorizationPlugin(name)) + } + return plugins +} + +// authorizationPlugin is an internal adapter to docker plugin system +type authorizationPlugin struct { + plugin *plugins.Plugin + name string +} + +func newAuthorizationPlugin(name string) Plugin { + return &authorizationPlugin{name: name} +} + +func (a *authorizationPlugin) Name() string { + return a.name +} + +func (a *authorizationPlugin) AuthZRequest(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Client.Call(AuthZApiRequest, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +func (a *authorizationPlugin) AuthZResponse(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Client.Call(AuthZApiResponse, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +// initPlugin initialize the authorization plugin if needed +func (a *authorizationPlugin) initPlugin() error { + // Lazy loading of plugins + if a.plugin == nil { + var err error + a.plugin, err = plugins.Get(a.name, AuthZApiImplements) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/response.go b/vendor/github.com/docker/docker/pkg/authorization/response.go new file mode 100644 index 00000000..245a0ef7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/response.go @@ -0,0 +1,203 @@ +package authorization + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "github.com/Sirupsen/logrus" + "net" + "net/http" +) + +// ResponseModifier allows authorization plugins to read and modify the content of the http.response +type ResponseModifier interface { + http.ResponseWriter + http.Flusher + http.CloseNotifier + + // RawBody returns the current http content + RawBody() []byte + + // RawHeaders returns the current content of the http headers + RawHeaders() ([]byte, error) + + // StatusCode returns the current status code + StatusCode() int + + // OverrideBody replace the body of the HTTP reply + OverrideBody(b []byte) + + // OverrideHeader replace the headers of the HTTP reply + OverrideHeader(b []byte) error + + // OverrideStatusCode replaces the status code of the HTTP reply + OverrideStatusCode(statusCode int) + + // Flush flushes all data to the HTTP response + FlushAll() error + + // Hijacked indicates the response has been hijacked by the Docker daemon + Hijacked() bool +} + +// NewResponseModifier creates a wrapper to an http.ResponseWriter to allow inspecting and modifying the content +func NewResponseModifier(rw http.ResponseWriter) ResponseModifier { + return &responseModifier{rw: rw, header: make(http.Header)} +} + +// responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore +// the http request/response from docker daemon +type responseModifier struct { + // The original response writer + rw http.ResponseWriter + + r *http.Request + + status int + // body holds the response body + body []byte + // header holds the response header + header http.Header + // statusCode holds the response status code + statusCode int + // hijacked indicates the request has been hijacked + hijacked bool +} + +func (rm *responseModifier) Hijacked() bool { + return rm.hijacked +} + +// WriterHeader stores the http status code +func (rm *responseModifier) WriteHeader(s int) { + + // Use original request if hijacked + if rm.hijacked { + rm.rw.WriteHeader(s) + return + } + + rm.statusCode = s +} + +// Header returns the internal http header +func (rm *responseModifier) Header() http.Header { + + // Use original header if hijacked + if rm.hijacked { + return rm.rw.Header() + } + + return rm.header +} + +// Header returns the internal http header +func (rm *responseModifier) StatusCode() int { + return rm.statusCode +} + +// Override replace the body of the HTTP reply +func (rm *responseModifier) OverrideBody(b []byte) { + rm.body = b +} + +func (rm *responseModifier) OverrideStatusCode(statusCode int) { + rm.statusCode = statusCode +} + +// Override replace the headers of the HTTP reply +func (rm *responseModifier) OverrideHeader(b []byte) error { + header := http.Header{} + if err := json.Unmarshal(b, &header); err != nil { + return err + } + rm.header = header + return nil +} + +// Write stores the byte array inside content +func (rm *responseModifier) Write(b []byte) (int, error) { + + if rm.hijacked { + return rm.rw.Write(b) + } + + rm.body = append(rm.body, b...) + return len(b), nil +} + +// Body returns the response body +func (rm *responseModifier) RawBody() []byte { + return rm.body +} + +func (rm *responseModifier) RawHeaders() ([]byte, error) { + var b bytes.Buffer + if err := rm.header.Write(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Hijack returns the internal connection of the wrapped http.ResponseWriter +func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) { + + rm.hijacked = true + rm.FlushAll() + + hijacker, ok := rm.rw.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("Internal response writer doesn't support the Hijacker interface") + } + return hijacker.Hijack() +} + +// CloseNotify uses the internal close notify API of the wrapped http.ResponseWriter +func (rm *responseModifier) CloseNotify() <-chan bool { + closeNotifier, ok := rm.rw.(http.CloseNotifier) + if !ok { + logrus.Errorf("Internal response writer doesn't support the CloseNotifier interface") + return nil + } + return closeNotifier.CloseNotify() +} + +// Flush uses the internal flush API of the wrapped http.ResponseWriter +func (rm *responseModifier) Flush() { + flusher, ok := rm.rw.(http.Flusher) + if !ok { + logrus.Errorf("Internal response writer doesn't support the Flusher interface") + return + } + + rm.FlushAll() + flusher.Flush() +} + +// FlushAll flushes all data to the HTTP response +func (rm *responseModifier) FlushAll() error { + // Copy the status code + if rm.statusCode > 0 { + rm.rw.WriteHeader(rm.statusCode) + } + + // Copy the header + for k, vv := range rm.header { + for _, v := range vv { + rm.rw.Header().Add(k, v) + } + } + + var err error + if len(rm.body) > 0 { + // Write body + _, err = rm.rw.Write(rm.body) + } + + // Clean previous data + rm.body = nil + rm.statusCode = 0 + rm.header = http.Header{} + return err +} diff --git a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go new file mode 100644 index 00000000..784d65d6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go @@ -0,0 +1,49 @@ +package broadcaster + +import ( + "io" + "sync" +) + +// Unbuffered accumulates multiple io.WriteCloser by stream. +type Unbuffered struct { + mu sync.Mutex + writers []io.WriteCloser +} + +// Add adds new io.WriteCloser. +func (w *Unbuffered) Add(writer io.WriteCloser) { + w.mu.Lock() + w.writers = append(w.writers, writer) + w.mu.Unlock() +} + +// Write writes bytes to all writers. Failed writers will be evicted during +// this call. +func (w *Unbuffered) Write(p []byte) (n int, err error) { + w.mu.Lock() + var evict []int + for i, sw := range w.writers { + if n, err := sw.Write(p); err != nil || n != len(p) { + // On error, evict the writer + evict = append(evict, i) + } + } + for n, i := range evict { + w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...) + } + w.mu.Unlock() + return len(p), nil +} + +// Clean closes and removes all writers. Last non-eol-terminated part of data +// will be saved. +func (w *Unbuffered) Clean() error { + w.mu.Lock() + for _, sw := range w.writers { + sw.Close() + } + w.writers = nil + w.mu.Unlock() + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go new file mode 100644 index 00000000..a7814f5b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -0,0 +1,97 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +var chrootArchiver = &archive.Archiver{Untar: Untar} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/' ('\' on Windows), the final +// destination path will be `dst/base(src)` or `dst\base(src)` +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go new file mode 100644 index 00000000..9b268566 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,94 @@ +// +build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func chroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return err + } + return syscall.Chdir("/") +} + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options *archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go new file mode 100644 index 00000000..94131a6e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go @@ -0,0 +1,19 @@ +package chrootarchive + +import "github.com/docker/docker/pkg/archive" + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go new file mode 100644 index 00000000..a4adb74d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,120 @@ +//+build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir = "" + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go new file mode 100644 index 00000000..4f637f17 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go new file mode 100644 index 00000000..23befae6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go @@ -0,0 +1,40 @@ +// Package filenotify provides a mechanism for watching file(s) for changes. +// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. +// These are wrapped up in a common interface so that either can be used interchangeably in your code. +package filenotify + +import "gopkg.in/fsnotify.v1" + +// FileWatcher is an interface for implementing file notification watchers +type FileWatcher interface { + Events() <-chan fsnotify.Event + Errors() <-chan error + Add(name string) error + Remove(name string) error + Close() error +} + +// New tries to use an fs-event watcher, and falls back to the poller if there is an error +func New() (FileWatcher, error) { + if watcher, err := NewEventWatcher(); err == nil { + return watcher, nil + } + return NewPollingWatcher(), nil +} + +// NewPollingWatcher returns a poll-based file watcher +func NewPollingWatcher() FileWatcher { + return &filePoller{ + events: make(chan fsnotify.Event), + errors: make(chan error), + } +} + +// NewEventWatcher returns an fs-event based file watcher +func NewEventWatcher() (FileWatcher, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + return &fsNotifyWatcher{watcher}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go new file mode 100644 index 00000000..42038835 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go @@ -0,0 +1,18 @@ +package filenotify + +import "gopkg.in/fsnotify.v1" + +// fsNotify wraps the fsnotify package to satisfy the FileNotifer interface +type fsNotifyWatcher struct { + *fsnotify.Watcher +} + +// GetEvents returns the fsnotify event channel receiver +func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event { + return w.Watcher.Events +} + +// GetErrors returns the fsnotify error channel receiver +func (w *fsNotifyWatcher) Errors() <-chan error { + return w.Watcher.Errors +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/poller.go b/vendor/github.com/docker/docker/pkg/filenotify/poller.go new file mode 100644 index 00000000..52610853 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/poller.go @@ -0,0 +1,204 @@ +package filenotify + +import ( + "errors" + "fmt" + "os" + "sync" + "time" + + "github.com/Sirupsen/logrus" + + "gopkg.in/fsnotify.v1" +) + +var ( + // errPollerClosed is returned when the poller is closed + errPollerClosed = errors.New("poller is closed") + // errNoSuchPoller is returned when trying to remove a watch that doesn't exist + errNoSuchWatch = errors.New("poller does not exist") +) + +// watchWaitTime is the time to wait between file poll loops +const watchWaitTime = 200 * time.Millisecond + +// filePoller is used to poll files for changes, especially in cases where fsnotify +// can't be run (e.g. when inotify handles are exhausted) +// filePoller satisfies the FileWatcher interface +type filePoller struct { + // watches is the list of files currently being polled, close the associated channel to stop the watch + watches map[string]chan struct{} + // events is the channel to listen to for watch events + events chan fsnotify.Event + // errors is the channel to listen to for watch errors + errors chan error + // mu locks the poller for modification + mu sync.Mutex + // closed is used to specify when the poller has already closed + closed bool +} + +// Add adds a filename to the list of watches +// once added the file is polled for changes in a separate goroutine +func (w *filePoller) Add(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed == true { + return errPollerClosed + } + + f, err := os.Open(name) + if err != nil { + return err + } + fi, err := os.Stat(name) + if err != nil { + return err + } + + if w.watches == nil { + w.watches = make(map[string]chan struct{}) + } + if _, exists := w.watches[name]; exists { + return fmt.Errorf("watch exists") + } + chClose := make(chan struct{}) + w.watches[name] = chClose + + go w.watch(f, fi, chClose) + return nil +} + +// Remove stops and removes watch with the specified name +func (w *filePoller) Remove(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + return w.remove(name) +} + +func (w *filePoller) remove(name string) error { + if w.closed == true { + return errPollerClosed + } + + chClose, exists := w.watches[name] + if !exists { + return errNoSuchWatch + } + close(chClose) + delete(w.watches, name) + return nil +} + +// Events returns the event channel +// This is used for notifications on events about watched files +func (w *filePoller) Events() <-chan fsnotify.Event { + return w.events +} + +// Errors returns the errors channel +// This is used for notifications about errors on watched files +func (w *filePoller) Errors() <-chan error { + return w.errors +} + +// Close closes the poller +// All watches are stopped, removed, and the poller cannot be added to +func (w *filePoller) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return nil + } + + w.closed = true + for name := range w.watches { + w.remove(name) + delete(w.watches, name) + } + return nil +} + +// sendEvent publishes the specified event to the events channel +func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error { + select { + case w.events <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// sendErr publishes the specified error to the errors channel +func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { + select { + case w.errors <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// watch is responsible for polling the specified file for changes +// upon finding changes to a file or errors, sendEvent/sendErr is called +func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { + defer f.Close() + for { + time.Sleep(watchWaitTime) + select { + case <-chClose: + logrus.Debugf("watch for %s closed", f.Name()) + return + default: + } + + fi, err := os.Stat(f.Name()) + if err != nil { + // if we got an error here and lastFi is not set, we can presume that nothing has changed + // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called + if lastFi == nil { + continue + } + // If it doesn't exist at this point, it must have been removed + // no need to send the error here since this is a valid operation + if os.IsNotExist(err) { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil { + return + } + lastFi = nil + continue + } + // at this point, send the error + if err := w.sendErr(err, chClose); err != nil { + return + } + continue + } + + if lastFi == nil { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.Mode() != lastFi.Mode() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go index 08b9840c..c1e309fe 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -6,7 +6,9 @@ import ( "io" "os" "path/filepath" + "regexp" "strings" + "text/scanner" "github.com/Sirupsen/logrus" ) @@ -50,7 +52,7 @@ func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { if exclusion(pattern) { pattern = pattern[1:] } - patternDirs = append(patternDirs, strings.Split(pattern, "/")) + patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator))) } return cleanedPatterns, patternDirs, exceptions, nil @@ -76,13 +78,14 @@ func Matches(file string, patterns []string) (bool, error) { // OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. // It will assume that the inputs have been preprocessed and therefore the function -// doen't need to do as much error checking and clean-up. This was done to avoid +// doesn't need to do as much error checking and clean-up. This was done to avoid // repeating these steps on each file being checked during the archive process. // The more generic fileutils.Matches() can't make these assumptions. func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { matched := false + file = filepath.FromSlash(file) parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, "/") + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) for i, pattern := range patterns { negative := false @@ -92,16 +95,16 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, pattern = pattern[1:] } - match, err := filepath.Match(pattern, file) + match, err := regexpMatch(pattern, file) if err != nil { - return false, err + return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) } if !match && parentPath != "." { // Check to see if the pattern matches one of our parent dirs. if len(patDirs[i]) <= len(parentPathDirs) { - match, _ = filepath.Match(strings.Join(patDirs[i], "/"), - strings.Join(parentPathDirs[:len(patDirs[i])], "/")) + match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)), + strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator))) } } @@ -117,6 +120,102 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, return matched, nil } +// regexpMatch tries to match the logic of filepath.Match but +// does so using regexp logic. We do this so that we can expand the +// wildcard set to include other things, like "**" to mean any number +// of directories. This means that we should be backwards compatible +// with filepath.Match(). We'll end up supporting more stuff, due to +// the fact that we're using regexp, but that's ok - it does no harm. +// +// As per the comment in golangs filepath.Match, on Windows, escaping +// is disabled. Instead, '\\' is treated as path separator. +func regexpMatch(pattern, path string) (bool, error) { + regStr := "^" + + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(pattern, path); err != nil { + return false, err + } + + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" + } + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if strings.Index(".$", string(ch)) != -1 { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + res, err := regexp.MatchString(regStr, path) + + // Map regexp's error to filepath's so no one knows we're not using filepath + if err != nil { + err = filepath.ErrBadPattern + } + + return res, err +} + // CopyFile copies from src to dst until either EOF is reached // on src or an error occurs. It verifies src exists and remove // the dst if it exists. diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go deleted file mode 100644 index b544ffbf..00000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go +++ /dev/null @@ -1,402 +0,0 @@ -package fileutils - -import ( - "io/ioutil" - "os" - "path" - "path/filepath" - "testing" -) - -// CopyFile with invalid src -func TestCopyFileWithInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) - if err == nil { - t.Fatal("Should have fail to copy an invalid src file") - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes") - } - -} - -// CopyFile with invalid dest -func TestCopyFileWithInvalidDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - src := path.Join(tempFolder, "file") - err = ioutil.WriteFile(src, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) - if err == nil { - t.Fatal("Should have fail to copy an invalid src file") - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes") - } - -} - -// CopyFile with same src and dest -func TestCopyFileWithSameSrcAndDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - file := path.Join(tempFolder, "file") - err = ioutil.WriteFile(file, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(file, file) - if err != nil { - t.Fatal(err) - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes as it is the same file.") - } -} - -// CopyFile with same src and dest but path is different and not clean -func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - testFolder := path.Join(tempFolder, "test") - err = os.MkdirAll(testFolder, 0740) - if err != nil { - t.Fatal(err) - } - file := path.Join(testFolder, "file") - sameFile := testFolder + "/../test/file" - err = ioutil.WriteFile(file, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(file, sameFile) - if err != nil { - t.Fatal(err) - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes as it is the same file.") - } -} - -func TestCopyFile(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - src := path.Join(tempFolder, "src") - dest := path.Join(tempFolder, "dest") - ioutil.WriteFile(src, []byte("content"), 0777) - ioutil.WriteFile(dest, []byte("destContent"), 0777) - bytes, err := CopyFile(src, dest) - if err != nil { - t.Fatal(err) - } - if bytes != 7 { - t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) - } - actual, err := ioutil.ReadFile(dest) - if err != nil { - t.Fatal(err) - } - if string(actual) != "content" { - t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") - } -} - -// Reading a symlink to a directory must return the directory -func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { - var err error - if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { - t.Errorf("failed to create directory: %s", err) - } - - if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { - t.Fatalf("failed to read symlink to directory: %s", err) - } - - if path != "/tmp/testReadSymlinkToExistingDirectory" { - t.Fatalf("symlink returned unexpected directory: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { - t.Errorf("failed to remove temporary directory: %s", err) - } - - if err = os.Remove("/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -// Reading a non-existing symlink must fail -func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { - var path string - var err error - if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { - t.Fatalf("error expected for non-existing symlink") - } - - if path != "" { - t.Fatalf("expected empty path, but '%s' was returned", path) - } -} - -// Reading a symlink to a file must fail -func TestReadSymlinkedDirectoryToFile(t *testing.T) { - var err error - var file *os.File - - if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { - t.Fatalf("failed to create file: %s", err) - } - - file.Close() - - if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { - t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") - } - - if path != "" { - t.Fatalf("path should've been empty: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { - t.Errorf("failed to remove file: %s", err) - } - - if err = os.Remove("/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -func TestWildcardMatches(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*"}) - if match != true { - t.Errorf("failed to get a wildcard match, got %v", match) - } -} - -// A simple pattern match should return true. -func TestPatternMatches(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*.go"}) - if match != true { - t.Errorf("failed to get a match, got %v", match) - } -} - -// An exclusion followed by an inclusion should return true. -func TestExclusionPatternMatchesPatternBefore(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) - if match != true { - t.Errorf("failed to get true match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) - if match != false { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) - if match != false { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) - if match != false { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A pattern followed by an exclusion should return false. -func TestExclusionPatternMatchesPatternAfter(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) - if match != false { - t.Errorf("failed to get false match on exclusion pattern, got %v", match) - } -} - -// A filename evaluating to . should return false. -func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { - match, _ := Matches(".", []string{"*.go"}) - if match != false { - t.Errorf("failed to get false match on ., got %v", match) - } -} - -// A single ! pattern should return an error. -func TestSingleExclamationError(t *testing.T) { - _, err := Matches("fileutils.go", []string{"!"}) - if err == nil { - t.Errorf("failed to get an error for a single exclamation point, got %v", err) - } -} - -// A string preceded with a ! should return true from Exclusion. -func TestExclusion(t *testing.T) { - exclusion := exclusion("!") - if !exclusion { - t.Errorf("failed to get true for a single !, got %v", exclusion) - } -} - -// Matches with no patterns -func TestMatchesWithNoPatterns(t *testing.T) { - matches, err := Matches("/any/path/there", []string{}) - if err != nil { - t.Fatal(err) - } - if matches { - t.Fatalf("Should not have match anything") - } -} - -// Matches with malformed patterns -func TestMatchesWithMalformedPatterns(t *testing.T) { - matches, err := Matches("/any/path/there", []string{"["}) - if err == nil { - t.Fatal("Should have failed because of a malformed syntax in the pattern") - } - if matches { - t.Fatalf("Should not have match anything") - } -} - -// An empty string should return true from Empty. -func TestEmpty(t *testing.T) { - empty := empty("") - if !empty { - t.Errorf("failed to get true for an empty string, got %v", empty) - } -} - -func TestCleanPatterns(t *testing.T) { - cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) - if len(cleaned) != 2 { - t.Errorf("expected 2 element slice, got %v", len(cleaned)) - } -} - -func TestCleanPatternsStripEmptyPatterns(t *testing.T) { - cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) - if len(cleaned) != 2 { - t.Errorf("expected 2 element slice, got %v", len(cleaned)) - } -} - -func TestCleanPatternsExceptionFlag(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) - } -} - -func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) - } -} - -func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) - } -} - -func TestCleanPatternsErrorSingleException(t *testing.T) { - _, _, _, err := CleanPatterns([]string{"!"}) - if err == nil { - t.Errorf("expected error on single exclamation point, got %v", err) - } -} - -func TestCleanPatternsFolderSplit(t *testing.T) { - _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) - if dirs[0][0] != "docs" { - t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) - } - if dirs[0][1] != "config" { - t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) - } -} - -func TestCreateIfNotExistsDir(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - - folderToCreate := filepath.Join(tempFolder, "tocreate") - - if err := CreateIfNotExists(folderToCreate, true); err != nil { - t.Fatal(err) - } - fileinfo, err := os.Stat(folderToCreate) - if err != nil { - t.Fatalf("Should have create a folder, got %v", err) - } - - if !fileinfo.IsDir() { - t.Fatalf("Should have been a dir, seems it's not") - } -} - -func TestCreateIfNotExistsFile(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - - fileToCreate := filepath.Join(tempFolder, "file/to/create") - - if err := CreateIfNotExists(fileToCreate, false); err != nil { - t.Fatal(err) - } - fileinfo, err := os.Stat(fileToCreate) - if err != nil { - t.Fatalf("Should have create a file, got %v", err) - } - - if fileinfo.IsDir() { - t.Fatalf("Should have been a file, seems it's not") - } -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go deleted file mode 100644 index 5ec21cac..00000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. Not supported -// on Windows. -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/docker/docker/utils/git.go b/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go similarity index 92% rename from vendor/github.com/docker/docker/utils/git.go rename to vendor/github.com/docker/docker/pkg/gitutils/gitutils.go index 4d0bb164..b4afcf91 100644 --- a/vendor/github.com/docker/docker/utils/git.go +++ b/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go @@ -1,4 +1,4 @@ -package utils +package gitutils import ( "fmt" @@ -6,7 +6,7 @@ import ( "net/http" "net/url" "os" - "os/exec" + "github.com/docker/containerd/subreaper/exec" "path/filepath" "strings" @@ -14,9 +14,9 @@ import ( "github.com/docker/docker/pkg/urlutil" ) -// GitClone clones a repository into a newly created directory which +// Clone clones a repository into a newly created directory which // will be under "docker-build-git" -func GitClone(remoteURL string) (string, error) { +func Clone(remoteURL string) (string, error) { if !urlutil.IsGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go deleted file mode 100644 index 7a95cb2b..00000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package homedir - -import ( - "path/filepath" - "testing" -) - -func TestGet(t *testing.T) { - home := Get() - if home == "" { - t.Fatal("returned home directory is empty") - } - - if !filepath.IsAbs(home) { - t.Fatalf("returned path is not absolute: %s", home) - } -} - -func TestGetShortcutString(t *testing.T) { - shortcut := GetShortcutString() - if shortcut == "" { - t.Fatal("returned shortcut string is empty") - } -} diff --git a/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go b/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go deleted file mode 100644 index d35d0821..00000000 --- a/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package httputils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestDownload(t *testing.T) { - expected := "Hello, docker !" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, expected) - })) - defer ts.Close() - response, err := Download(ts.URL) - if err != nil { - t.Fatal(err) - } - - actual, err := ioutil.ReadAll(response.Body) - response.Body.Close() - - if err != nil || string(actual) != expected { - t.Fatalf("Expected the response %q, got err:%v, response:%v, actual:%s", expected, err, response, string(actual)) - } -} - -func TestDownload400Errors(t *testing.T) { - expectedError := "Got HTTP status code >= 400: 403 Forbidden" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // 403 - http.Error(w, "something failed (forbidden)", http.StatusForbidden) - })) - defer ts.Close() - // Expected status code = 403 - if _, err := Download(ts.URL); err == nil || err.Error() != expectedError { - t.Fatalf("Expected the the error %q, got %v", expectedError, err) - } -} - -func TestDownloadOtherErrors(t *testing.T) { - if _, err := Download("I'm not an url.."); err == nil || !strings.Contains(err.Error(), "unsupported protocol scheme") { - t.Fatalf("Expected an error with 'unsupported protocol scheme', got %v", err) - } -} - -func TestNewHTTPRequestError(t *testing.T) { - errorMessage := "Some error message" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // 403 - http.Error(w, errorMessage, http.StatusForbidden) - })) - defer ts.Close() - httpResponse, err := http.Get(ts.URL) - if err != nil { - t.Fatal(err) - } - if err := NewHTTPRequestError(errorMessage, httpResponse); err.Error() != errorMessage { - t.Fatalf("Expected err to be %q, got %v", errorMessage, err) - } -} - -func TestParseServerHeader(t *testing.T) { - inputs := map[string][]string{ - "bad header": {"error"}, - "(bad header)": {"error"}, - "(without/spaces)": {"error"}, - "(header/with spaces)": {"error"}, - "foo/bar (baz)": {"foo", "bar", "baz"}, - "foo/bar": {"error"}, - "foo": {"error"}, - "foo/bar (baz space)": {"foo", "bar", "baz space"}, - " f f / b b ( b s ) ": {"f f", "b b", "b s"}, - "foo/bar (baz) ignore": {"foo", "bar", "baz"}, - "foo/bar ()": {"error"}, - "foo/bar()": {"error"}, - "foo/bar(baz)": {"foo", "bar", "baz"}, - "foo/bar/zzz(baz)": {"foo/bar", "zzz", "baz"}, - "foo/bar(baz/abc)": {"foo", "bar", "baz/abc"}, - "foo/bar(baz (abc))": {"foo", "bar", "baz (abc)"}, - } - - for header, values := range inputs { - serverHeader, err := ParseServerHeader(header) - if err != nil { - if err != errInvalidHeader { - t.Fatalf("Failed to parse %q, and got some unexpected error: %q", header, err) - } - if values[0] == "error" { - continue - } - t.Fatalf("Header %q failed to parse when it shouldn't have", header) - } - if values[0] == "error" { - t.Fatalf("Header %q parsed ok when it should have failed(%q).", header, serverHeader) - } - - if serverHeader.App != values[0] { - t.Fatalf("Expected serverHeader.App for %q to equal %q, got %q", header, values[0], serverHeader.App) - } - - if serverHeader.Ver != values[1] { - t.Fatalf("Expected serverHeader.Ver for %q to equal %q, got %q", header, values[1], serverHeader.Ver) - } - - if serverHeader.OS != values[2] { - t.Fatalf("Expected serverHeader.OS for %q to equal %q, got %q", header, values[2], serverHeader.OS) - } - - } - -} diff --git a/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go b/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go deleted file mode 100644 index 9de433ee..00000000 --- a/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package httputils - -import ( - "testing" -) - -func TestDetectContentType(t *testing.T) { - input := []byte("That is just a plain text") - - if contentType, _, err := DetectContentType(input); err != nil || contentType != "text/plain" { - t.Errorf("TestDetectContentType failed") - } -} diff --git a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go deleted file mode 100644 index e9d05783..00000000 --- a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package httputils - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestResumableRequestHeaderSimpleErrors(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "Hello, world !") - })) - defer ts.Close() - - client := &http.Client{} - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - expectedError := "client and request can't be nil\n" - resreq := &resumableRequestReader{} - _, err = resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) - } - - resreq = &resumableRequestReader{ - client: client, - request: req, - totalSize: -1, - } - expectedError = "failed to auto detect content length" - _, err = resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) - } - -} - -// Not too much failures, bails out after some wait -func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { - client := &http.Client{} - - var badReq *http.Request - badReq, err := http.NewRequest("GET", "I'm not an url", nil) - if err != nil { - t.Fatal(err) - } - - resreq := &resumableRequestReader{ - client: client, - request: badReq, - failures: 0, - maxFailures: 2, - } - read, err := resreq.Read([]byte{}) - if err != nil || read != 0 { - t.Fatalf("Expected no error and no byte read, got err:%v, read:%v.", err, read) - } -} - -// Too much failures, returns the error -func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { - client := &http.Client{} - - var badReq *http.Request - badReq, err := http.NewRequest("GET", "I'm not an url", nil) - if err != nil { - t.Fatal(err) - } - - resreq := &resumableRequestReader{ - client: client, - request: badReq, - failures: 0, - maxFailures: 1, - } - defer resreq.Close() - - expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` - read, err := resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError || read != 0 { - t.Fatalf("Expected the error '%s', got err:%v, read:%v.", expectedError, err, read) - } -} - -type errorReaderCloser struct{} - -func (errorReaderCloser) Close() error { return nil } - -func (errorReaderCloser) Read(p []byte) (n int, err error) { - return 0, fmt.Errorf("A error occured") -} - -// If a an unknown error is encountered, return 0, nil and log it -func TestResumableRequestReaderWithReadError(t *testing.T) { - var req *http.Request - req, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - - response := &http.Response{ - Status: "500 Internal Server", - StatusCode: 500, - ContentLength: 0, - Close: true, - Body: errorReaderCloser{}, - } - - resreq := &resumableRequestReader{ - client: client, - request: req, - currentResponse: response, - lastRange: 1, - totalSize: 1, - } - defer resreq.Close() - - buf := make([]byte, 1) - read, err := resreq.Read(buf) - if err != nil { - t.Fatal(err) - } - - if read != 0 { - t.Fatalf("Expected to have read nothing, but read %v", read) - } -} - -func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { - var req *http.Request - req, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - - response := &http.Response{ - Status: "416 Requested Range Not Satisfiable", - StatusCode: 416, - ContentLength: 0, - Close: true, - Body: ioutil.NopCloser(strings.NewReader("")), - } - - resreq := &resumableRequestReader{ - client: client, - request: req, - currentResponse: response, - lastRange: 1, - totalSize: 1, - } - defer resreq.Close() - - buf := make([]byte, 1) - _, err = resreq.Read(buf) - if err == nil || err != io.EOF { - t.Fatalf("Expected an io.EOF error, got %v", err) - } -} - -func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Header.Get("Range") == "" { - t.Fatalf("Expected a Range HTTP header, got nothing") - } - })) - defer ts.Close() - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - - resreq := &resumableRequestReader{ - client: client, - request: req, - lastRange: 1, - } - defer resreq.Close() - - buf := make([]byte, 2) - _, err = resreq.Read(buf) - if err == nil || err.Error() != "the server doesn't support byte ranges" { - t.Fatalf("Expected an error 'the server doesn't support byte ranges', got %v", err) - } -} - -func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { - - srvtxt := "some response text data" - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, srvtxt) - })) - defer ts.Close() - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - retries := uint32(5) - - resreq := ResumableRequestReader(client, req, retries, 0) - defer resreq.Close() - - data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } - - resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Errorf("resstr != srvtxt") - } -} - -func TestResumableRequestReader(t *testing.T) { - - srvtxt := "some response text data" - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, srvtxt) - })) - defer ts.Close() - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - retries := uint32(5) - imgSize := int64(len(srvtxt)) - - resreq := ResumableRequestReader(client, req, retries, imgSize) - defer resreq.Close() - - data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } - - resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Errorf("resstr != srvtxt") - } -} - -func TestResumableRequestReaderWithInitialResponse(t *testing.T) { - - srvtxt := "some response text data" - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, srvtxt) - })) - defer ts.Close() - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - retries := uint32(5) - imgSize := int64(len(srvtxt)) - - res, err := client.Do(req) - if err != nil { - t.Fatal(err) - } - - resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res) - defer resreq.Close() - - data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } - - resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Errorf("resstr != srvtxt") - } -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go index a1301ee9..6bca4662 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -155,6 +155,9 @@ func parseSubgid(username string) (ranges, error) { return parseSubidFile(subgidFileName, username) } +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned func parseSubidFile(path, username string) (ranges, error) { var rangeList ranges @@ -171,15 +174,14 @@ func parseSubidFile(path, username string) (ranges, error) { } text := strings.TrimSpace(s.Text()) - if text == "" { + if text == "" || strings.HasPrefix(text, "#") { continue } parts := strings.Split(text, ":") if len(parts) != 3 { return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) } - if parts[0] == username { - // return the first entry for a user; ignores potential for multiple ranges per user + if parts[0] == username || username == "ALL" { startid, err := strconv.Atoi(parts[1]) if err != nil { return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go deleted file mode 100644 index 55b338c9..00000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go +++ /dev/null @@ -1,243 +0,0 @@ -// +build !windows - -package idtools - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" -) - -type node struct { - uid int - gid int -} - -func TestMkdirAllAs(t *testing.T) { - dirName, err := ioutil.TempDir("", "mkdirall") - if err != nil { - t.Fatalf("Couldn't create temp dir: %v", err) - } - defer os.RemoveAll(dirName) - - testTree := map[string]node{ - "usr": {0, 0}, - "usr/bin": {0, 0}, - "lib": {33, 33}, - "lib/x86_64": {45, 45}, - "lib/x86_64/share": {1, 1}, - } - - if err := buildTree(dirName, testTree); err != nil { - t.Fatal(err) - } - - // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid - if err := MkdirAllAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { - t.Fatal(err) - } - testTree["usr/share"] = node{99, 99} - verifyTree, err := readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // test 2-deep new directories--both should be owned by the uid/gid pair - if err := MkdirAllAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { - t.Fatal(err) - } - testTree["lib/some"] = node{101, 101} - testTree["lib/some/other"] = node{101, 101} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // test a directory that already exists; should be chowned, but nothing else - if err := MkdirAllAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { - t.Fatal(err) - } - testTree["usr"] = node{102, 102} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } -} - -func TestMkdirAllNewAs(t *testing.T) { - - dirName, err := ioutil.TempDir("", "mkdirnew") - if err != nil { - t.Fatalf("Couldn't create temp dir: %v", err) - } - defer os.RemoveAll(dirName) - - testTree := map[string]node{ - "usr": {0, 0}, - "usr/bin": {0, 0}, - "lib": {33, 33}, - "lib/x86_64": {45, 45}, - "lib/x86_64/share": {1, 1}, - } - - if err := buildTree(dirName, testTree); err != nil { - t.Fatal(err) - } - - // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid - if err := MkdirAllNewAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { - t.Fatal(err) - } - testTree["usr/share"] = node{99, 99} - verifyTree, err := readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // test 2-deep new directories--both should be owned by the uid/gid pair - if err := MkdirAllNewAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { - t.Fatal(err) - } - testTree["lib/some"] = node{101, 101} - testTree["lib/some/other"] = node{101, 101} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // test a directory that already exists; should NOT be chowned - if err := MkdirAllNewAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { - t.Fatal(err) - } - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } -} - -func TestMkdirAs(t *testing.T) { - - dirName, err := ioutil.TempDir("", "mkdir") - if err != nil { - t.Fatalf("Couldn't create temp dir: %v", err) - } - defer os.RemoveAll(dirName) - - testTree := map[string]node{ - "usr": {0, 0}, - } - if err := buildTree(dirName, testTree); err != nil { - t.Fatal(err) - } - - // test a directory that already exists; should just chown to the requested uid/gid - if err := MkdirAs(filepath.Join(dirName, "usr"), 0755, 99, 99); err != nil { - t.Fatal(err) - } - testTree["usr"] = node{99, 99} - verifyTree, err := readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // create a subdir under a dir which doesn't exist--should fail - if err := MkdirAs(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, 102, 102); err == nil { - t.Fatalf("Trying to create a directory with Mkdir where the parent doesn't exist should have failed") - } - - // create a subdir under an existing dir; should only change the ownership of the new subdir - if err := MkdirAs(filepath.Join(dirName, "usr", "bin"), 0755, 102, 102); err != nil { - t.Fatal(err) - } - testTree["usr/bin"] = node{102, 102} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } -} - -func buildTree(base string, tree map[string]node) error { - for path, node := range tree { - fullPath := filepath.Join(base, path) - if err := os.MkdirAll(fullPath, 0755); err != nil { - return fmt.Errorf("Couldn't create path: %s; error: %v", fullPath, err) - } - if err := os.Chown(fullPath, node.uid, node.gid); err != nil { - return fmt.Errorf("Couldn't chown path: %s; error: %v", fullPath, err) - } - } - return nil -} - -func readTree(base, root string) (map[string]node, error) { - tree := make(map[string]node) - - dirInfos, err := ioutil.ReadDir(base) - if err != nil { - return nil, fmt.Errorf("Couldn't read directory entries for %q: %v", base, err) - } - - for _, info := range dirInfos { - s := &syscall.Stat_t{} - if err := syscall.Stat(filepath.Join(base, info.Name()), s); err != nil { - return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err) - } - tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)} - if info.IsDir() { - // read the subdirectory - subtree, err := readTree(filepath.Join(base, info.Name()), filepath.Join(root, info.Name())) - if err != nil { - return nil, err - } - for path, nodeinfo := range subtree { - tree[path] = nodeinfo - } - } - } - return tree, nil -} - -func compareTrees(left, right map[string]node) error { - if len(left) != len(right) { - return fmt.Errorf("Trees aren't the same size") - } - for path, nodeLeft := range left { - if nodeRight, ok := right[path]; ok { - if nodeRight.uid != nodeLeft.uid || nodeRight.gid != nodeLeft.gid { - // mismatch - return fmt.Errorf("mismatched ownership for %q: expected: %d:%d, got: %d:%d", path, - nodeLeft.uid, nodeLeft.gid, nodeRight.uid, nodeRight.gid) - } - continue - } - return fmt.Errorf("right tree didn't contain path %q", path) - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go deleted file mode 100644 index c9e3c937..00000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package idtools - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { - return err - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go index c1eedff1..2bef12a5 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -2,33 +2,33 @@ package idtools import ( "fmt" - "os/exec" + "github.com/docker/containerd/subreaper/exec" "path/filepath" + "regexp" + "sort" + "strconv" "strings" - "syscall" ) // add a user and/or group to Linux /etc/passwd, /etc/group using standard // Linux distribution commands: -// adduser --uid --shell /bin/login --no-create-home --disabled-login --ingroup -// useradd -M -u -s /bin/nologin -N -g -// addgroup --gid -// groupadd -g - -const baseUID int = 10000 -const baseGID int = 10000 -const idMAX int = 65534 +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false var ( - userCommand string - groupCommand string + userCommand string cmdTemplates = map[string]string{ - "adduser": "--uid %d --shell /bin/false --no-create-home --disabled-login --ingroup %s %s", - "useradd": "-M -u %d -s /bin/false -N -g %s %s", - "addgroup": "--gid %d %s", - "groupadd": "-g %d %s", + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" ) func init() { @@ -38,11 +38,6 @@ func init() { } else if _, err := resolveBinary("useradd"); err == nil { userCommand = "useradd" } - if _, err := resolveBinary("addgroup"); err == nil { - groupCommand = "addgroup" - } else if _, err := resolveBinary("groupadd"); err == nil { - groupCommand = "groupadd" - } } func resolveBinary(binname string) (string, error) { @@ -62,94 +57,132 @@ func resolveBinary(binname string) (string, error) { return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) } -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -// This new user's /etc/sub{uid,gid} ranges will be used for user namespace +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace // mapping ranges in containers. func AddNamespaceRangesUser(name string) (int, int, error) { - // Find unused uid, gid pair - uid, err := findUnusedUID(baseUID) - if err != nil { - return -1, -1, fmt.Errorf("Unable to find unused UID: %v", err) - } - gid, err := findUnusedGID(baseGID) - if err != nil { - return -1, -1, fmt.Errorf("Unable to find unused GID: %v", err) + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) } - // First add the group that we will use - if err := addGroup(name, gid); err != nil { - return -1, -1, fmt.Errorf("Error adding group %q: %v", name, err) + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) } - // Add the user as a member of the group - if err := addUser(name, uid, name); err != nil { - return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) } return uid, gid, nil } -func addUser(userName string, uid int, groupName string) error { +func addUser(userName string) error { if userCommand == "" { return fmt.Errorf("Cannot add user; no useradd/adduser binary found") } - args := fmt.Sprintf(cmdTemplates[userCommand], uid, groupName, userName) - return execAddCmd(userCommand, args) -} - -func addGroup(groupName string, gid int) error { - - if groupCommand == "" { - return fmt.Errorf("Cannot add group; no groupadd/addgroup binary found") - } - args := fmt.Sprintf(cmdTemplates[groupCommand], gid, groupName) - // only error out if the error isn't that the group already exists - // if the group exists then our needs are already met - if err := execAddCmd(groupCommand, args); err != nil && !strings.Contains(err.Error(), "already exists") { - return err - } - return nil -} - -func execAddCmd(cmd, args string) error { - execCmd := exec.Command(cmd, strings.Split(args, " ")...) - out, err := execCmd.CombinedOutput() + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) if err != nil { - return fmt.Errorf("Failed to add user/group with error: %v; output: %q", err, string(out)) + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) } return nil } -func findUnusedUID(startUID int) (int, error) { - return findUnused("passwd", startUID) -} +func createSubordinateRanges(name string) error { -func findUnusedGID(startGID int) (int, error) { - return findUnused("group", startGID) -} - -func findUnused(file string, id int) (int, error) { - for { - cmdStr := fmt.Sprintf("cat /etc/%s | cut -d: -f3 | grep '^%d$'", file, id) - cmd := exec.Command("sh", "-c", cmdStr) - if err := cmd.Run(); err != nil { - // if a non-zero return code occurs, then we know the ID was not found - // and is usable - if exiterr, ok := err.(*exec.ExitError); ok { - // The program has exited with an exit code != 0 - if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { - if status.ExitStatus() == 1 { - //no match, we can use this ID - return id, nil - } - } - } - return -1, fmt.Errorf("Error looking in /etc/%s for unused ID: %v", file, err) + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) } - id++ - if id > idMAX { - return -1, fmt.Errorf("Maximum id in %q reached with finding unused numeric ID", file) + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) } } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go index 932e1d1b..fcaecc37 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -1,16 +1,32 @@ package ioutils +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. const maxCap = 1e6 -// BytesPipe is io.ReadWriter which works similarly to pipe(queue). -// All written data could be read only once. Also BytesPipe is allocating -// and releasing new byte slices to adjust to current needs, so there won't be -// overgrown buffer after high load peak. -// BytesPipe isn't goroutine-safe, caller must synchronize it if needed. +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +// ErrClosed is returned when Write is called on a closed BytesPipe. +var ErrClosed = errors.New("write to closed BytesPipe") + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond buf [][]byte // slice of byte-slices of buffered data lastRead int // index in the first slice to a read point bufLen int // length of data buffered over the slices + closeErr error // error to return from next Read. set to nil if not closed. } // NewBytesPipe creates new BytesPipe, initialized by specified slice. @@ -20,15 +36,24 @@ func NewBytesPipe(buf []byte) *BytesPipe { if cap(buf) == 0 { buf = make([]byte, 0, 64) } - return &BytesPipe{ + bp := &BytesPipe{ buf: [][]byte{buf[:0]}, } + bp.wait = sync.NewCond(&bp.mu) + return bp } // Write writes p to BytesPipe. // It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (n int, err error) { +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + defer bp.mu.Unlock() + written := 0 +loop0: for { + if bp.closeErr != nil { + return written, ErrClosed + } // write data to the last buffer b := bp.buf[len(bp.buf)-1] // copy data to the current empty allocated area @@ -38,6 +63,8 @@ func (bp *BytesPipe) Write(p []byte) (n int, err error) { // include written data in last buffer bp.buf[len(bp.buf)-1] = b[:len(b)+n] + written += n + // if there was enough room to write all then break if len(p) == n { break @@ -45,15 +72,43 @@ func (bp *BytesPipe) Write(p []byte) (n int, err error) { // more data: write to the next slice p = p[n:] + + // block if too much data is still in the buffer + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + // allocate slice that has twice the size of the last unless maximum reached nextCap := 2 * cap(bp.buf[len(bp.buf)-1]) - if maxCap < nextCap { + if nextCap > maxCap { nextCap = maxCap } // add new byte slice to the buffers slice and continue writing bp.buf = append(bp.buf, make([]byte, 0, nextCap)) } - return + bp.wait.Broadcast() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) } func (bp *BytesPipe) len() int { @@ -63,6 +118,17 @@ func (bp *BytesPipe) len() int { // Read reads bytes from BytesPipe. // Data could be read only once. func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.len() == 0 { + if bp.closeErr != nil { + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.len() == 0 && bp.closeErr != nil { + return 0, bp.closeErr + } + } for { read := copy(p, bp.buf[0][bp.lastRead:]) n += read @@ -85,5 +151,6 @@ func (bp *BytesPipe) Read(p []byte) (n int, err error) { bp.buf[0] = nil // throw away old slice bp.buf = bp.buf[1:] // switch to next } + bp.wait.Broadcast() return } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go deleted file mode 100644 index 62b1a186..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package ioutils - -import ( - "crypto/sha1" - "encoding/hex" - "testing" -) - -func TestBytesPipeRead(t *testing.T) { - buf := NewBytesPipe(nil) - buf.Write([]byte("12")) - buf.Write([]byte("34")) - buf.Write([]byte("56")) - buf.Write([]byte("78")) - buf.Write([]byte("90")) - rd := make([]byte, 4) - n, err := buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 4 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) - } - if string(rd) != "1234" { - t.Fatalf("Read %s, but must be %s", rd, "1234") - } - n, err = buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 4 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) - } - if string(rd) != "5678" { - t.Fatalf("Read %s, but must be %s", rd, "5679") - } - n, err = buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 2 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2) - } - if string(rd[:n]) != "90" { - t.Fatalf("Read %s, but must be %s", rd, "90") - } -} - -func TestBytesPipeWrite(t *testing.T) { - buf := NewBytesPipe(nil) - buf.Write([]byte("12")) - buf.Write([]byte("34")) - buf.Write([]byte("56")) - buf.Write([]byte("78")) - buf.Write([]byte("90")) - if string(buf.buf[0]) != "1234567890" { - t.Fatalf("Buffer %s, must be %s", buf.buf, "1234567890") - } -} - -// Write and read in different speeds/chunk sizes and check valid data is read. -func TestBytesPipeWriteRandomChunks(t *testing.T) { - cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{ - {100, 10, 1}, - {1000, 10, 5}, - {1000, 100, 0}, - {1000, 5, 6}, - {10000, 50, 25}, - } - - testMessage := []byte("this is a random string for testing") - // random slice sizes to read and write - writeChunks := []int{25, 35, 15, 20} - readChunks := []int{5, 45, 20, 25} - - for _, c := range cases { - // first pass: write directly to hash - hash := sha1.New() - for i := 0; i < c.iterations*c.writesPerLoop; i++ { - if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil { - t.Fatal(err) - } - } - expected := hex.EncodeToString(hash.Sum(nil)) - - // write/read through buffer - buf := NewBytesPipe(nil) - hash.Reset() - for i := 0; i < c.iterations; i++ { - for w := 0; w < c.writesPerLoop; w++ { - buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]]) - } - for r := 0; r < c.readsPerLoop; r++ { - p := make([]byte, readChunks[(i*c.readsPerLoop+r)%len(readChunks)]) - n, _ := buf.Read(p) - hash.Write(p[:n]) - } - } - // read rest of the data from buffer - for i := 0; ; i++ { - p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)]) - n, _ := buf.Read(p) - if n == 0 { - break - } - hash.Write(p[:n]) - } - actual := hex.EncodeToString(hash.Sum(nil)) - - if expected != actual { - t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual) - } - - } -} - -func BenchmarkBytesPipeWrite(b *testing.B) { - for i := 0; i < b.N; i++ { - buf := NewBytesPipe(nil) - for j := 0; j < 1000; j++ { - buf.Write([]byte("pretty short line, because why not?")) - } - } -} - -func BenchmarkBytesPipeRead(b *testing.B) { - rd := make([]byte, 1024) - for i := 0; i < b.N; i++ { - b.StopTimer() - buf := NewBytesPipe(nil) - for j := 0; j < 1000; j++ { - buf.Write(make([]byte, 1024)) - } - b.StartTimer() - for j := 0; j < 1000; j++ { - if n, _ := buf.Read(rd); n != 1024 { - b.Fatalf("Wrong number of bytes: %d", n) - } - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go deleted file mode 100644 index 89688632..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package ioutils - -import "testing" - -func TestFprintfIfNotEmpty(t *testing.T) { - wc := NewWriteCounter(&NopWriter{}) - n, _ := FprintfIfNotEmpty(wc, "foo%s", "") - - if wc.Count != 0 || n != 0 { - t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) - } - - n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") - if wc.Count != 6 || n != 6 { - t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) - } -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go b/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go deleted file mode 100644 index de495b56..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "testing" -) - -func TestMultiReadSeekerReadAll(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) - - b, err := ioutil.ReadAll(mr) - if err != nil { - t.Fatal(err) - } - - expected := "hello world 1hello world 2hello world 3" - if string(b) != expected { - t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) - } - - size, err := mr.Seek(0, os.SEEK_END) - if err != nil { - t.Fatal(err) - } - if size != expectedSize { - t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) - } - - // Reset the position and read again - pos, err := mr.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatal(err) - } - if pos != 0 { - t.Fatalf("expected position to be set to 0, got %d", pos) - } - - b, err = ioutil.ReadAll(mr) - if err != nil { - t.Fatal(err) - } - - if string(b) != expected { - t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) - } -} - -func TestMultiReadSeekerReadEach(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - var totalBytes int64 - for i, s := range []*strings.Reader{s1, s2, s3} { - sLen := int64(s.Len()) - buf := make([]byte, s.Len()) - expected := []byte(fmt.Sprintf("%s %d", str, i+1)) - - if _, err := mr.Read(buf); err != nil && err != io.EOF { - t.Fatal(err) - } - - if !bytes.Equal(buf, expected) { - t.Fatalf("expected %q to be %q", string(buf), string(expected)) - } - - pos, err := mr.Seek(0, os.SEEK_CUR) - if err != nil { - t.Fatalf("iteration: %d, error: %v", i+1, err) - } - - // check that the total bytes read is the current position of the seeker - totalBytes += sLen - if pos != totalBytes { - t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) - } - - // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well - newPos, err := mr.Seek(pos, os.SEEK_SET) - if err != nil { - t.Fatal(err) - } - if newPos != pos { - t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) - } - } -} - -func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - buf := make([]byte, s1.Len()+3) - _, err := mr.Read(buf) - if err != nil { - t.Fatal(err) - } - - // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string - expected := "hello world 1hel" - if string(buf) != expected { - t.Fatalf("expected %s to be %s", string(buf), expected) - } -} - -func TestMultiReadSeekerNegativeSeek(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - s1Len := s1.Len() - s2Len := s2.Len() - s3Len := s3.Len() - - s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) - if err != nil { - t.Fatal(err) - } - if s != int64(s1Len+s2Len) { - t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) - } - - buf := make([]byte, s3Len) - if _, err := mr.Read(buf); err != nil && err != io.EOF { - t.Fatal(err) - } - expected := fmt.Sprintf("%s %d", str, 3) - if string(buf) != fmt.Sprintf("%s %d", str, 3) { - t.Fatalf("expected %q to be %q", string(buf), expected) - } -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go index 54dd312b..e73b02bb 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -4,7 +4,8 @@ import ( "crypto/sha256" "encoding/hex" "io" - "sync" + + "golang.org/x/net/context" ) type readCloserWrapper struct { @@ -45,92 +46,6 @@ func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { } } -// bufReader allows the underlying reader to continue to produce -// output by pre-emptively reading from the wrapped reader. -// This is achieved by buffering this data in bufReader's -// expanding buffer. -type bufReader struct { - sync.Mutex - buf io.ReadWriter - reader io.Reader - err error - wait sync.Cond - drainBuf []byte -} - -// NewBufReader returns a new bufReader. -func NewBufReader(r io.Reader) io.ReadCloser { - reader := &bufReader{ - buf: NewBytesPipe(nil), - reader: r, - drainBuf: make([]byte, 1024), - } - reader.wait.L = &reader.Mutex - go reader.drain() - return reader -} - -// NewBufReaderWithDrainbufAndBuffer returns a BufReader with drainBuffer and buffer. -func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer io.ReadWriter) io.ReadCloser { - reader := &bufReader{ - buf: buffer, - drainBuf: drainBuffer, - reader: r, - } - reader.wait.L = &reader.Mutex - go reader.drain() - return reader -} - -func (r *bufReader) drain() { - for { - //Call to scheduler is made to yield from this goroutine. - //This avoids goroutine looping here when n=0,err=nil, fixes code hangs when run with GCC Go. - callSchedulerIfNecessary() - n, err := r.reader.Read(r.drainBuf) - r.Lock() - if err != nil { - r.err = err - } else { - if n == 0 { - // nothing written, no need to signal - r.Unlock() - continue - } - r.buf.Write(r.drainBuf[:n]) - } - r.wait.Signal() - r.Unlock() - if err != nil { - break - } - } -} - -func (r *bufReader) Read(p []byte) (n int, err error) { - r.Lock() - defer r.Unlock() - for { - n, err = r.buf.Read(p) - if n > 0 { - return n, err - } - if r.err != nil { - return 0, r.err - } - r.wait.Wait() - } -} - -// Close closes the bufReader -func (r *bufReader) Close() error { - closer, ok := r.reader.(io.ReadCloser) - if !ok { - return nil - } - return closer.Close() -} - // HashData returns the sha256 sum of src. func HashData(src io.Reader) (string, error) { h := sha256.New() @@ -168,3 +83,72 @@ func (r *OnEOFReader) runFunc() { r.Fn = nil } } + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go b/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go deleted file mode 100644 index 5c26a2a1..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go +++ /dev/null @@ -1,224 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "strings" - "testing" - "time" -) - -// Implement io.Reader -type errorReader struct{} - -func (r *errorReader) Read(p []byte) (int, error) { - return 0, fmt.Errorf("Error reader always fail.") -} - -func TestReadCloserWrapperClose(t *testing.T) { - reader := strings.NewReader("A string reader") - wrapper := NewReadCloserWrapper(reader, func() error { - return fmt.Errorf("This will be called when closing") - }) - err := wrapper.Close() - if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { - t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") - } -} - -func TestReaderErrWrapperReadOnError(t *testing.T) { - called := false - reader := &errorReader{} - wrapper := NewReaderErrWrapper(reader, func() { - called = true - }) - _, err := wrapper.Read([]byte{}) - if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { - t.Fatalf("readErrWrapper should returned an error") - } - if !called { - t.Fatalf("readErrWrapper should have call the anonymous function on failure") - } -} - -func TestReaderErrWrapperRead(t *testing.T) { - reader := strings.NewReader("a string reader.") - wrapper := NewReaderErrWrapper(reader, func() { - t.Fatalf("readErrWrapper should not have called the anonymous function") - }) - // Read 20 byte (should be ok with the string above) - num, err := wrapper.Read(make([]byte, 20)) - if err != nil { - t.Fatal(err) - } - if num != 16 { - t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) - } -} - -func TestNewBufReaderWithDrainbufAndBuffer(t *testing.T) { - reader, writer := io.Pipe() - - drainBuffer := make([]byte, 1024) - buffer := NewBytesPipe(nil) - bufreader := NewBufReaderWithDrainbufAndBuffer(reader, drainBuffer, buffer) - - // Write everything down to a Pipe - // Usually, a pipe should block but because of the buffered reader, - // the writes will go through - done := make(chan bool) - go func() { - writer.Write([]byte("hello world")) - writer.Close() - done <- true - }() - - // Drain the reader *after* everything has been written, just to verify - // it is indeed buffering - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("timeout") - } - - output, err := ioutil.ReadAll(bufreader) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(output, []byte("hello world")) { - t.Error(string(output)) - } -} - -func TestBufReader(t *testing.T) { - reader, writer := io.Pipe() - bufreader := NewBufReader(reader) - - // Write everything down to a Pipe - // Usually, a pipe should block but because of the buffered reader, - // the writes will go through - done := make(chan bool) - go func() { - writer.Write([]byte("hello world")) - writer.Close() - done <- true - }() - - // Drain the reader *after* everything has been written, just to verify - // it is indeed buffering - <-done - output, err := ioutil.ReadAll(bufreader) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(output, []byte("hello world")) { - t.Error(string(output)) - } -} - -func TestBufReaderCloseWithNonReaderCloser(t *testing.T) { - reader := strings.NewReader("buffer") - bufreader := NewBufReader(reader) - - if err := bufreader.Close(); err != nil { - t.Fatal(err) - } - -} - -// implements io.ReadCloser -type simpleReaderCloser struct { - err error -} - -func (r *simpleReaderCloser) Read(p []byte) (n int, err error) { - return 0, r.err -} - -func (r *simpleReaderCloser) Close() error { - r.err = io.EOF - return nil -} - -func TestBufReaderCloseWithReaderCloser(t *testing.T) { - reader := &simpleReaderCloser{} - bufreader := NewBufReader(reader) - - err := bufreader.Close() - if err != nil { - t.Fatal(err) - } - -} - -func TestHashData(t *testing.T) { - reader := strings.NewReader("hash-me") - actual, err := HashData(reader) - if err != nil { - t.Fatal(err) - } - expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" - if actual != expected { - t.Fatalf("Expecting %s, got %s", expected, actual) - } -} - -type repeatedReader struct { - readCount int - maxReads int - data []byte -} - -func newRepeatedReader(max int, data []byte) *repeatedReader { - return &repeatedReader{0, max, data} -} - -func (r *repeatedReader) Read(p []byte) (int, error) { - if r.readCount >= r.maxReads { - return 0, io.EOF - } - r.readCount++ - n := copy(p, r.data) - return n, nil -} - -func testWithData(data []byte, reads int) { - reader := newRepeatedReader(reads, data) - bufReader := NewBufReader(reader) - io.Copy(ioutil.Discard, bufReader) -} - -func Benchmark1M10BytesReads(b *testing.B) { - reads := 1000000 - readSize := int64(10) - data := make([]byte, readSize) - b.SetBytes(readSize * int64(reads)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - testWithData(data, reads) - } -} - -func Benchmark1M1024BytesReads(b *testing.B) { - reads := 1000000 - readSize := int64(1024) - data := make([]byte, readSize) - b.SetBytes(readSize * int64(reads)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - testWithData(data, reads) - } -} - -func Benchmark10k32KBytesReads(b *testing.B) { - reads := 10000 - readSize := int64(32 * 1024) - data := make([]byte, readSize) - b.SetBytes(readSize * int64(reads)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - testWithData(data, reads) - } -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go deleted file mode 100644 index c258e5fd..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package ioutils - -import ( - "io/ioutil" - - "github.com/docker/docker/pkg/longpath" -) - -// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. -func TempDir(dir, prefix string) (string, error) { - tempDir, err := ioutil.TempDir(dir, prefix) - if err != nil { - return "", err - } - return longpath.AddPrefix(tempDir), nil -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go index 2b35a266..52a4901a 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -1,9 +1,7 @@ package ioutils import ( - "errors" "io" - "net/http" "sync" ) @@ -11,45 +9,43 @@ import ( // is a flush. In addition, the Close method can be called to intercept // Read/Write calls if the targets lifecycle has already ended. type WriteFlusher struct { - mu sync.Mutex - w io.Writer - flusher http.Flusher - flushed bool - closed error - - // TODO(stevvooe): Use channel for closed instead, remove mutex. Using a - // channel will allow one to properly order the operations. + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex } -var errWriteFlusherClosed = errors.New("writeflusher: closed") +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - wf.mu.Lock() - defer wf.mu.Unlock() - if wf.closed != nil { - return 0, wf.closed + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: } n, err = wf.w.Write(b) - wf.flush() // every write is a flush. + wf.Flush() // every write is a flush. return n, err } // Flush the stream immediately. func (wf *WriteFlusher) Flush() { - wf.mu.Lock() - defer wf.mu.Unlock() - - wf.flush() -} - -// flush the stream immediately without taking a lock. Used internally. -func (wf *WriteFlusher) flush() { - if wf.closed != nil { + select { + case <-wf.closed: return + default: } - wf.flushed = true + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) wf.flusher.Flush() } @@ -59,34 +55,38 @@ func (wf *WriteFlusher) Flushed() bool { // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to // be used to detect whether or a response code has been issued or not. // Another hook should be used instead. - wf.mu.Lock() - defer wf.mu.Unlock() - - return wf.flushed + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed } // Close closes the write flusher, disallowing any further writes to the // target. After the flusher is closed, all calls to write or flush will // result in an error. func (wf *WriteFlusher) Close() error { - wf.mu.Lock() - defer wf.mu.Unlock() + wf.closeLock.Lock() + defer wf.closeLock.Unlock() - if wf.closed != nil { - return wf.closed + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) } - - wf.closed = errWriteFlusherClosed return nil } // NewWriteFlusher returns a new WriteFlusher. func NewWriteFlusher(w io.Writer) *WriteFlusher { - var flusher http.Flusher - if f, ok := w.(http.Flusher); ok { - flusher = f + var fl flusher + if f, ok := w.(flusher); ok { + fl = f } else { - flusher = &NopFlusher{} + fl = &NopFlusher{} } - return &WriteFlusher{w: w, flusher: flusher} + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go index 7a3249f3..ccc7f9c2 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -20,7 +20,7 @@ func NopWriteCloser(w io.Writer) io.WriteCloser { return &nopWriteCloser{w} } -// NopFlusher represents a type which flush opetatin is nop. +// NopFlusher represents a type which flush operation is nop. type NopFlusher struct{} // Flush is a nop operation. diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go b/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go deleted file mode 100644 index 564b1cd4..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package ioutils - -import ( - "bytes" - "strings" - "testing" -) - -func TestWriteCloserWrapperClose(t *testing.T) { - called := false - writer := bytes.NewBuffer([]byte{}) - wrapper := NewWriteCloserWrapper(writer, func() error { - called = true - return nil - }) - if err := wrapper.Close(); err != nil { - t.Fatal(err) - } - if !called { - t.Fatalf("writeCloserWrapper should have call the anonymous function.") - } -} - -func TestNopWriteCloser(t *testing.T) { - writer := bytes.NewBuffer([]byte{}) - wrapper := NopWriteCloser(writer) - if err := wrapper.Close(); err != nil { - t.Fatal("NopWriteCloser always return nil on Close.") - } - -} - -func TestNopWriter(t *testing.T) { - nw := &NopWriter{} - l, err := nw.Write([]byte{'c'}) - if err != nil { - t.Fatal(err) - } - if l != 1 { - t.Fatalf("Expected 1 got %d", l) - } -} - -func TestWriteCounter(t *testing.T) { - dummy1 := "This is a dummy string." - dummy2 := "This is another dummy string." - totalLength := int64(len(dummy1) + len(dummy2)) - - reader1 := strings.NewReader(dummy1) - reader2 := strings.NewReader(dummy2) - - var buffer bytes.Buffer - wc := NewWriteCounter(&buffer) - - reader1.WriteTo(wc) - reader2.WriteTo(wc) - - if wc.Count != totalLength { - t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) - } - - if buffer.String() != dummy1+dummy2 { - t.Error("Wrong message written") - } -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go new file mode 100644 index 00000000..422e4bbd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go @@ -0,0 +1,40 @@ +package jsonlog + +import ( + "encoding/json" + "fmt" + "time" +) + +// JSONLog represents a log message, typically a single entry from a given log stream. +// JSONLogs can be easily serialized to and from JSON and support custom formatting. +type JSONLog struct { + // Log is the log message + Log string `json:"log,omitempty"` + // Stream is the log source + Stream string `json:"stream,omitempty"` + // Created is the created timestamp of log + Created time.Time `json:"time"` +} + +// Format returns the log formatted according to format +// If format is nil, returns the log message +// If format is json, returns the log marshaled in json format +// By default, returns the log with the log time formatted according to format. +func (jl *JSONLog) Format(format string) (string, error) { + if format == "" { + return jl.Log, nil + } + if format == "json" { + m, err := json.Marshal(jl) + return string(m), err + } + return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil +} + +// Reset resets the log to nil. +func (jl *JSONLog) Reset() { + jl.Log = "" + jl.Stream = "" + jl.Created = time.Time{} +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go new file mode 100644 index 00000000..83ce684a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go @@ -0,0 +1,178 @@ +// This code was initially generated by ffjson +// This code was generated via the following steps: +// $ go get -u github.com/pquerna/ffjson +// $ make BIND_DIR=. shell +// $ ffjson pkg/jsonlog/jsonlog.go +// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go +// +// It has been modified to improve the performance of time marshalling to JSON +// and to clean it up. +// Should this code need to be regenerated when the JSONLog struct is changed, +// the relevant changes which have been made are: +// import ( +// "bytes" +//- +// "unicode/utf8" +// ) +// +// func (mj *JSONLog) MarshalJSON() ([]byte, error) { +//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { +// } +// return buf.Bytes(), nil +// } +//+ +// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +//- var err error +//- var obj []byte +//- var first bool = true +//- _ = obj +//- _ = err +//- _ = first +//+ var ( +//+ err error +//+ timestamp string +//+ first bool = true +//+ ) +// buf.WriteString(`{`) +// if len(mj.Log) != 0 { +// if first == true { +//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// buf.WriteString(`,`) +// } +// buf.WriteString(`"time":`) +//- obj, err = mj.Created.MarshalJSON() +//+ timestamp, err = FastTimeMarshalJSON(mj.Created) +// if err != nil { +// return err +// } +//- buf.Write(obj) +//+ buf.WriteString(timestamp) +// buf.WriteString(`}`) +// return nil +// } +// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// if len(mj.Log) != 0 { +// - if first == true { +// - first = false +// - } else { +// - buf.WriteString(`,`) +// - } +// + first = false +// buf.WriteString(`"log":`) +// ffjsonWriteJSONString(buf, mj.Log) +// } + +package jsonlog + +import ( + "bytes" + "unicode/utf8" +) + +// MarshalJSON marshals the JSONLog. +func (mj *JSONLog) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + buf.Grow(1024) + if err := mj.MarshalJSONBuf(&buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. +func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { + var ( + err error + timestamp string + first = true + ) + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONString(buf, mj.Stream) + } + if !first { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + timestamp, err = FastTimeMarshalJSON(mj.Created) + if err != nil { + return err + } + buf.WriteString(timestamp) + buf.WriteString(`}`) + return nil +} + +func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go new file mode 100644 index 00000000..df522c0d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go @@ -0,0 +1,122 @@ +package jsonlog + +import ( + "bytes" + "encoding/json" + "unicode/utf8" +) + +// JSONLogs is based on JSONLog. +// It allows marshalling JSONLog from Log as []byte +// and an already marshalled Created timestamp. +type JSONLogs struct { + Log []byte `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created string `json:"time"` + + // json-encoded bytes + RawAttrs json.RawMessage `json:"attrs,omitempty"` +} + +// MarshalJSONBuf is based on the same method from JSONLog +// It has been modified to take into account the necessary changes. +func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { + var first = true + + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONBytesAsString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONString(buf, mj.Stream) + } + if len(mj.RawAttrs) > 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"attrs":`) + buf.Write(mj.RawAttrs) + } + if !first { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + buf.WriteString(mj.Created) + buf.WriteString(`}`) + return nil +} + +// This is based on ffjsonWriteJSONBytesAsString. It has been changed +// to accept a string passed as a slice of bytes. +func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.Write(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.Write(s[start:]) + } + buf.WriteByte('"') +} diff --git a/vendor/github.com/docker/docker/pkg/timeutils/json.go b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go similarity index 74% rename from vendor/github.com/docker/docker/pkg/timeutils/json.go rename to vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go index ea19daad..21173381 100644 --- a/vendor/github.com/docker/docker/pkg/timeutils/json.go +++ b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go @@ -1,5 +1,5 @@ -// Package timeutils provides helper functions to parse and print time (time.Time). -package timeutils +// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. +package jsonlog import ( "errors" @@ -15,9 +15,9 @@ const ( JSONFormat = `"` + time.RFC3339Nano + `"` ) -// FastMarshalJSON avoids one of the extra allocations that +// FastTimeMarshalJSON avoids one of the extra allocations that // time.MarshalJSON is making. -func FastMarshalJSON(t time.Time) (string, error) { +func FastTimeMarshalJSON(t time.Time) (string, error) { if y := t.Year(); y < 0 || y >= 10000 { // RFC 3339 is clear that years are 4 digits exactly. // See golang.org/issue/4556#c15 for more discussion. diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go index 451c6a9f..65cccbce 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -7,9 +7,9 @@ import ( "strings" "time" + "github.com/docker/docker/pkg/jsonlog" "github.com/docker/docker/pkg/term" - "github.com/docker/docker/pkg/timeutils" - "github.com/docker/docker/pkg/units" + "github.com/docker/go-units" ) // JSONError wraps a concrete Code and Message, `Code` is @@ -60,7 +60,7 @@ func (p *JSONProgress) String() string { percentage = 50 } if width > 110 { - // this number can't be negetive gh#7136 + // this number can't be negative gh#7136 numSpaces := 0 if 50-percentage > 0 { numSpaces = 50 - percentage @@ -102,11 +102,13 @@ type JSONMessage struct { TimeNano int64 `json:"timeNano,omitempty"` Error *JSONError `json:"errorDetail,omitempty"` ErrorMessage string `json:"error,omitempty"` //deprecated + // Aux contains out-of-band data, such as digests for push signing. + Aux *json.RawMessage `json:"aux,omitempty"` } // Display displays the JSONMessage to `out`. `isTerminal` describes if `out` // is a terminal. If this is the case, it will erase the entire current line -// when dislaying the progressbar. +// when displaying the progressbar. func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { if jm.Error != nil { if jm.Error.Code == 401 { @@ -123,9 +125,9 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { return nil } if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(timeutils.RFC3339NanoFixed)) + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed)) + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) } if jm.ID != "" { fmt.Fprintf(out, "%s: ", jm.ID) @@ -148,13 +150,13 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { // DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` // describes if `out` is a terminal. If this is the case, it will print `\n` at the end of // each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool) error { +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { var ( - dec = json.NewDecoder(in) - ids = make(map[string]int) - diff = 0 + dec = json.NewDecoder(in) + ids = make(map[string]int) ) for { + diff := 0 var jm JSONMessage if err := dec.Decode(&jm); err != nil { if err == io.EOF { @@ -163,28 +165,51 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, return err } + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm.Aux) + } + continue + } + if jm.Progress != nil { jm.Progress.terminalFd = terminalFd } if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { line, ok := ids[jm.ID] if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. line = len(ids) ids[jm.ID] = line if isTerminal { fmt.Fprintf(out, "\n") } - diff = 0 } else { diff = len(ids) - line } - if jm.ID != "" && isTerminal { + if isTerminal { + // NOTE: this appears to be necessary even if + // diff == 0. // [{diff}A = move cursor up diff rows fmt.Fprintf(out, "%c[%dA", 27, diff) } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]int) } err := jm.Display(out, isTerminal) if jm.ID != "" && isTerminal { + // NOTE: this appears to be necessary even if + // diff == 0. // [{diff}B = move cursor down diff rows fmt.Fprintf(out, "%c[%dB", 27, diff) } diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go deleted file mode 100644 index 7f46a8f6..00000000 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go +++ /dev/null @@ -1,231 +0,0 @@ -package jsonmessage - -import ( - "bytes" - "fmt" - "strings" - "testing" - "time" - - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/pkg/timeutils" -) - -func TestError(t *testing.T) { - je := JSONError{404, "Not found"} - if je.Error() != "Not found" { - t.Fatalf("Expected 'Not found' got '%s'", je.Error()) - } -} - -func TestProgress(t *testing.T) { - jp := JSONProgress{} - if jp.String() != "" { - t.Fatalf("Expected empty string, got '%s'", jp.String()) - } - - expected := " 1 B" - jp2 := JSONProgress{Current: 1} - if jp2.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp2.String()) - } - - expectedStart := "[==========> ] 20 B/100 B" - jp3 := JSONProgress{Current: 20, Total: 100, Start: time.Now().Unix()} - // Just look at the start of the string - // (the remaining time is really hard to test -_-) - if jp3.String()[:len(expectedStart)] != expectedStart { - t.Fatalf("Expected to start with %q, got %q", expectedStart, jp3.String()) - } - - expected = "[=========================> ] 50 B/100 B" - jp4 := JSONProgress{Current: 50, Total: 100} - if jp4.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp4.String()) - } - - // this number can't be negative gh#7136 - expected = "[==================================================>] 50 B" - jp5 := JSONProgress{Current: 50, Total: 40} - if jp5.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp5.String()) - } -} - -func TestJSONMessageDisplay(t *testing.T) { - now := time.Now() - messages := map[JSONMessage][]string{ - // Empty - JSONMessage{}: {"\n", "\n"}, - // Status - JSONMessage{ - Status: "status", - }: { - "status\n", - "status\n", - }, - // General - JSONMessage{ - Time: now.Unix(), - ID: "ID", - From: "From", - Status: "status", - }: { - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(timeutils.RFC3339NanoFixed)), - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(timeutils.RFC3339NanoFixed)), - }, - // General, with nano precision time - JSONMessage{ - TimeNano: now.UnixNano(), - ID: "ID", - From: "From", - Status: "status", - }: { - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(timeutils.RFC3339NanoFixed)), - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(timeutils.RFC3339NanoFixed)), - }, - // General, with both times Nano is preferred - JSONMessage{ - Time: now.Unix(), - TimeNano: now.UnixNano(), - ID: "ID", - From: "From", - Status: "status", - }: { - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(timeutils.RFC3339NanoFixed)), - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(timeutils.RFC3339NanoFixed)), - }, - // Stream over status - JSONMessage{ - Status: "status", - Stream: "stream", - }: { - "stream", - "stream", - }, - // With progress message - JSONMessage{ - Status: "status", - ProgressMessage: "progressMessage", - }: { - "status progressMessage", - "status progressMessage", - }, - // With progress, stream empty - JSONMessage{ - Status: "status", - Stream: "", - Progress: &JSONProgress{Current: 1}, - }: { - "", - fmt.Sprintf("%c[2K\rstatus 1 B\r", 27), - }, - } - - // The tests :) - for jsonMessage, expectedMessages := range messages { - // Without terminal - data := bytes.NewBuffer([]byte{}) - if err := jsonMessage.Display(data, false); err != nil { - t.Fatal(err) - } - if data.String() != expectedMessages[0] { - t.Fatalf("Expected [%v], got [%v]", expectedMessages[0], data.String()) - } - // With terminal - data = bytes.NewBuffer([]byte{}) - if err := jsonMessage.Display(data, true); err != nil { - t.Fatal(err) - } - if data.String() != expectedMessages[1] { - t.Fatalf("Expected [%v], got [%v]", expectedMessages[1], data.String()) - } - } -} - -// Test JSONMessage with an Error. It will return an error with the text as error, not the meaning of the HTTP code. -func TestJSONMessageDisplayWithJSONError(t *testing.T) { - data := bytes.NewBuffer([]byte{}) - jsonMessage := JSONMessage{Error: &JSONError{404, "Can't find it"}} - - err := jsonMessage.Display(data, true) - if err == nil || err.Error() != "Can't find it" { - t.Fatalf("Expected a JSONError 404, got [%v]", err) - } - - jsonMessage = JSONMessage{Error: &JSONError{401, "Anything"}} - err = jsonMessage.Display(data, true) - if err == nil || err.Error() != "Authentication is required." { - t.Fatalf("Expected an error [Authentication is required.], got [%v]", err) - } -} - -func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) { - var ( - inFd uintptr - ) - data := bytes.NewBuffer([]byte{}) - reader := strings.NewReader("This is not a 'valid' JSON []") - inFd, _ = term.GetFdInfo(reader) - - if err := DisplayJSONMessagesStream(reader, data, inFd, false); err == nil && err.Error()[:17] != "invalid character" { - t.Fatalf("Should have thrown an error (invalid character in ..), got [%v]", err) - } -} - -func TestDisplayJSONMessagesStream(t *testing.T) { - var ( - inFd uintptr - ) - - messages := map[string][]string{ - // empty string - "": { - "", - ""}, - // Without progress & ID - "{ \"status\": \"status\" }": { - "status\n", - "status\n", - }, - // Without progress, with ID - "{ \"id\": \"ID\",\"status\": \"status\" }": { - "ID: status\n", - fmt.Sprintf("ID: status\n%c[%dB", 27, 0), - }, - // With progress - "{ \"id\": \"ID\", \"status\": \"status\", \"progress\": \"ProgressMessage\" }": { - "ID: status ProgressMessage", - fmt.Sprintf("\n%c[%dAID: status ProgressMessage%c[%dB", 27, 0, 27, 0), - }, - // With progressDetail - "{ \"id\": \"ID\", \"status\": \"status\", \"progressDetail\": { \"Current\": 1} }": { - "", // progressbar is disabled in non-terminal - fmt.Sprintf("\n%c[%dA%c[2K\rID: status 1 B\r%c[%dB", 27, 0, 27, 27, 0), - }, - } - for jsonMessage, expectedMessages := range messages { - data := bytes.NewBuffer([]byte{}) - reader := strings.NewReader(jsonMessage) - inFd, _ = term.GetFdInfo(reader) - - // Without terminal - if err := DisplayJSONMessagesStream(reader, data, inFd, false); err != nil { - t.Fatal(err) - } - if data.String() != expectedMessages[0] { - t.Fatalf("Expected an [%v], got [%v]", expectedMessages[0], data.String()) - } - - // With terminal - data = bytes.NewBuffer([]byte{}) - reader = strings.NewReader(jsonMessage) - if err := DisplayJSONMessagesStream(reader, data, inFd, true); err != nil { - t.Fatal(err) - } - if data.String() != expectedMessages[1] { - t.Fatalf("Expected an [%v], got [%v]", expectedMessages[1], data.String()) - } - } - -} diff --git a/vendor/github.com/docker/docker/pkg/locker/README.md b/vendor/github.com/docker/docker/pkg/locker/README.md new file mode 100644 index 00000000..e84a815c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + s.mu.Lock() + i.data[name] = data + s.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modfying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/docker/docker/pkg/locker/locker.go b/vendor/github.com/docker/docker/pkg/locker/locker.go new file mode 100644 index 00000000..0b22ddfa --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go deleted file mode 100644 index 9b15bfff..00000000 --- a/vendor/github.com/docker/docker/pkg/longpath/longpath.go +++ /dev/null @@ -1,26 +0,0 @@ -// longpath introduces some constants and helper functions for handling long paths -// in Windows, which are expected to be prepended with `\\?\` and followed by either -// a drive letter, a UNC server\share, or a volume identifier. - -package longpath - -import ( - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix will add the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go b/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go deleted file mode 100644 index 01865eff..00000000 --- a/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package longpath - -import ( - "strings" - "testing" -) - -func TestStandardLongPath(t *testing.T) { - c := `C:\simple\path` - longC := AddPrefix(c) - if !strings.EqualFold(longC, `\\?\C:\simple\path`) { - t.Errorf("Wrong long path returned. Original = %s ; Long = %s", c, longC) - } -} - -func TestUNCLongPath(t *testing.T) { - c := `\\server\share\path` - longC := AddPrefix(c) - if !strings.EqualFold(longC, `\\?\UNC\server\share\path`) { - t.Errorf("Wrong UNC long path returned. Original = %s ; Long = %s", c, longC) - } -} diff --git a/vendor/github.com/docker/docker/pkg/mflag/LICENSE b/vendor/github.com/docker/docker/pkg/mflag/LICENSE index ac74d8f0..9b4f4a29 100644 --- a/vendor/github.com/docker/docker/pkg/mflag/LICENSE +++ b/vendor/github.com/docker/docker/pkg/mflag/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. +Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/docker/docker/pkg/mflag/flag.go b/vendor/github.com/docker/docker/pkg/mflag/flag.go index 43fd3051..e2a0c422 100644 --- a/vendor/github.com/docker/docker/pkg/mflag/flag.go +++ b/vendor/github.com/docker/docker/pkg/mflag/flag.go @@ -1,4 +1,4 @@ -// Copyright 2014-2015 The Docker & Go Authors. All rights reserved. +// Copyright 2014-2016 The Docker & Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -520,6 +520,20 @@ func Set(name, value string) error { return CommandLine.Set(name, value) } +// isZeroValue guesses whether the string represents the zero +// value for a flag. It is not accurate but in practice works OK. +func isZeroValue(value string) bool { + switch value { + case "false": + return true + case "": + return true + case "0": + return true + } + return false +} + // PrintDefaults prints, to standard error unless configured // otherwise, the default values of all defined flags in the set. func (fs *FlagSet) PrintDefaults() { @@ -537,7 +551,6 @@ func (fs *FlagSet) PrintDefaults() { } fs.VisitAll(func(flag *Flag) { - format := " -%s=%s" names := []string{} for _, name := range flag.Names { if name[0] != '#' { @@ -551,11 +564,14 @@ func (fs *FlagSet) PrintDefaults() { val = homedir.GetShortcutString() + val[len(home):] } - fmt.Fprintf(writer, format, strings.Join(names, ", -"), val) - for i, line := range strings.Split(flag.Usage, "\n") { - if i != 0 { - line = " " + line - } + if isZeroValue(val) { + format := " -%s" + fmt.Fprintf(writer, format, strings.Join(names, ", -")) + } else { + format := " -%s=%s" + fmt.Fprintf(writer, format, strings.Join(names, ", -"), val) + } + for _, line := range strings.Split(flag.Usage, "\n") { fmt.Fprintln(writer, "\t", line) } } @@ -1147,7 +1163,7 @@ func (fs *FlagSet) ReportError(str string, withHelp bool) { str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'" } } - fmt.Fprintf(fs.Out(), "docker: %s.\n", str) + fmt.Fprintf(fs.Out(), "%s: %s.\n", os.Args[0], str) } // Parsed reports whether fs.Parse has been called. @@ -1207,11 +1223,27 @@ func (v mergeVal) IsBoolFlag() bool { return false } +// Name returns the name of a mergeVal. +// If the original value had a name, return the original name, +// otherwise, return the key asinged to this mergeVal. +func (v mergeVal) Name() string { + type namedValue interface { + Name() string + } + if nVal, ok := v.Value.(namedValue); ok { + return nVal.Name() + } + return v.key +} + // Merge is an helper function that merges n FlagSets into a single dest FlagSet // In case of name collision between the flagsets it will apply -// the destination FlagSet's errorHandling behaviour. +// the destination FlagSet's errorHandling behavior. func Merge(dest *FlagSet, flagsets ...*FlagSet) error { for _, fset := range flagsets { + if fset.formal == nil { + continue + } for k, f := range fset.formal { if _, ok := dest.formal[k]; ok { var err error @@ -1233,6 +1265,9 @@ func Merge(dest *FlagSet, flagsets ...*FlagSet) error { } newF := *f newF.Value = mergeVal{f.Value, k, fset} + if dest.formal == nil { + dest.formal = make(map[string]*Flag) + } dest.formal[k] = &newF } } diff --git a/vendor/github.com/docker/docker/pkg/mflag/flag_test.go b/vendor/github.com/docker/docker/pkg/mflag/flag_test.go deleted file mode 100644 index 85f32c8a..00000000 --- a/vendor/github.com/docker/docker/pkg/mflag/flag_test.go +++ /dev/null @@ -1,516 +0,0 @@ -// Copyright 2014-2015 The Docker & Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mflag - -import ( - "bytes" - "fmt" - "os" - "sort" - "strings" - "testing" - "time" -) - -// ResetForTesting clears all flag state and sets the usage function as directed. -// After calling ResetForTesting, parse errors in flag handling will not -// exit the program. -func ResetForTesting(usage func()) { - CommandLine = NewFlagSet(os.Args[0], ContinueOnError) - Usage = usage -} -func boolString(s string) string { - if s == "0" { - return "false" - } - return "true" -} - -func TestEverything(t *testing.T) { - ResetForTesting(nil) - Bool([]string{"test_bool"}, false, "bool value") - Int([]string{"test_int"}, 0, "int value") - Int64([]string{"test_int64"}, 0, "int64 value") - Uint([]string{"test_uint"}, 0, "uint value") - Uint64([]string{"test_uint64"}, 0, "uint64 value") - String([]string{"test_string"}, "0", "string value") - Float64([]string{"test_float64"}, 0, "float64 value") - Duration([]string{"test_duration"}, 0, "time.Duration value") - - m := make(map[string]*Flag) - desired := "0" - visitor := func(f *Flag) { - for _, name := range f.Names { - if len(name) > 5 && name[0:5] == "test_" { - m[name] = f - ok := false - switch { - case f.Value.String() == desired: - ok = true - case name == "test_bool" && f.Value.String() == boolString(desired): - ok = true - case name == "test_duration" && f.Value.String() == desired+"s": - ok = true - } - if !ok { - t.Error("Visit: bad value", f.Value.String(), "for", name) - } - } - } - } - VisitAll(visitor) - if len(m) != 8 { - t.Error("VisitAll misses some flags") - for k, v := range m { - t.Log(k, *v) - } - } - m = make(map[string]*Flag) - Visit(visitor) - if len(m) != 0 { - t.Errorf("Visit sees unset flags") - for k, v := range m { - t.Log(k, *v) - } - } - // Now set all flags - Set("test_bool", "true") - Set("test_int", "1") - Set("test_int64", "1") - Set("test_uint", "1") - Set("test_uint64", "1") - Set("test_string", "1") - Set("test_float64", "1") - Set("test_duration", "1s") - desired = "1" - Visit(visitor) - if len(m) != 8 { - t.Error("Visit fails after set") - for k, v := range m { - t.Log(k, *v) - } - } - // Now test they're visited in sort order. - var flagNames []string - Visit(func(f *Flag) { - for _, name := range f.Names { - flagNames = append(flagNames, name) - } - }) - if !sort.StringsAreSorted(flagNames) { - t.Errorf("flag names not sorted: %v", flagNames) - } -} - -func TestGet(t *testing.T) { - ResetForTesting(nil) - Bool([]string{"test_bool"}, true, "bool value") - Int([]string{"test_int"}, 1, "int value") - Int64([]string{"test_int64"}, 2, "int64 value") - Uint([]string{"test_uint"}, 3, "uint value") - Uint64([]string{"test_uint64"}, 4, "uint64 value") - String([]string{"test_string"}, "5", "string value") - Float64([]string{"test_float64"}, 6, "float64 value") - Duration([]string{"test_duration"}, 7, "time.Duration value") - - visitor := func(f *Flag) { - for _, name := range f.Names { - if len(name) > 5 && name[0:5] == "test_" { - g, ok := f.Value.(Getter) - if !ok { - t.Errorf("Visit: value does not satisfy Getter: %T", f.Value) - return - } - switch name { - case "test_bool": - ok = g.Get() == true - case "test_int": - ok = g.Get() == int(1) - case "test_int64": - ok = g.Get() == int64(2) - case "test_uint": - ok = g.Get() == uint(3) - case "test_uint64": - ok = g.Get() == uint64(4) - case "test_string": - ok = g.Get() == "5" - case "test_float64": - ok = g.Get() == float64(6) - case "test_duration": - ok = g.Get() == time.Duration(7) - } - if !ok { - t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name) - } - } - } - } - VisitAll(visitor) -} - -func testParse(f *FlagSet, t *testing.T) { - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - boolFlag := f.Bool([]string{"bool"}, false, "bool value") - bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") - f.Bool([]string{"bool3"}, false, "bool3 value") - bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value") - intFlag := f.Int([]string{"-int"}, 0, "int value") - int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") - uintFlag := f.Uint([]string{"uint"}, 0, "uint value") - uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") - stringFlag := f.String([]string{"string"}, "0", "string value") - f.String([]string{"string2"}, "0", "string2 value") - singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") - doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") - mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") - mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value") - nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value") - nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value") - float64Flag := f.Float64([]string{"float64"}, 0, "float64 value") - durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value") - extra := "one-extra-argument" - args := []string{ - "-bool", - "-bool2=true", - "-bool4=false", - "--int", "22", - "--int64", "0x23", - "-uint", "24", - "--uint64", "25", - "-string", "hello", - "-squote='single'", - `-dquote="double"`, - `-mquote='mixed"`, - `-mquote2="mixed2'`, - `-nquote="'single nested'"`, - `-nquote2='"double nested"'`, - "-float64", "2718e28", - "-duration", "2m", - extra, - } - if err := f.Parse(args); err != nil { - t.Fatal(err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *boolFlag != true { - t.Error("bool flag should be true, is ", *boolFlag) - } - if *bool2Flag != true { - t.Error("bool2 flag should be true, is ", *bool2Flag) - } - if !f.IsSet("bool2") { - t.Error("bool2 should be marked as set") - } - if f.IsSet("bool3") { - t.Error("bool3 should not be marked as set") - } - if !f.IsSet("bool4") { - t.Error("bool4 should be marked as set") - } - if *bool4Flag != false { - t.Error("bool4 flag should be false, is ", *bool4Flag) - } - if *intFlag != 22 { - t.Error("int flag should be 22, is ", *intFlag) - } - if *int64Flag != 0x23 { - t.Error("int64 flag should be 0x23, is ", *int64Flag) - } - if *uintFlag != 24 { - t.Error("uint flag should be 24, is ", *uintFlag) - } - if *uint64Flag != 25 { - t.Error("uint64 flag should be 25, is ", *uint64Flag) - } - if *stringFlag != "hello" { - t.Error("string flag should be `hello`, is ", *stringFlag) - } - if !f.IsSet("string") { - t.Error("string flag should be marked as set") - } - if f.IsSet("string2") { - t.Error("string2 flag should not be marked as set") - } - if *singleQuoteFlag != "single" { - t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) - } - if *doubleQuoteFlag != "double" { - t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag) - } - if *mixedQuoteFlag != `'mixed"` { - t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag) - } - if *mixed2QuoteFlag != `"mixed2'` { - t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag) - } - if *nestedQuoteFlag != "'single nested'" { - t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag) - } - if *nested2QuoteFlag != `"double nested"` { - t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag) - } - if *float64Flag != 2718e28 { - t.Error("float64 flag should be 2718e28, is ", *float64Flag) - } - if *durationFlag != 2*time.Minute { - t.Error("duration flag should be 2m, is ", *durationFlag) - } - if len(f.Args()) != 1 { - t.Error("expected one argument, got", len(f.Args())) - } else if f.Args()[0] != extra { - t.Errorf("expected argument %q got %q", extra, f.Args()[0]) - } -} - -func testPanic(f *FlagSet, t *testing.T) { - f.Int([]string{"-int"}, 0, "int value") - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - args := []string{ - "-int", "21", - } - f.Parse(args) -} - -func TestParsePanic(t *testing.T) { - ResetForTesting(func() {}) - testPanic(CommandLine, t) -} - -func TestParse(t *testing.T) { - ResetForTesting(func() { t.Error("bad parse") }) - testParse(CommandLine, t) -} - -func TestFlagSetParse(t *testing.T) { - testParse(NewFlagSet("test", ContinueOnError), t) -} - -// Declare a user-defined flag type. -type flagVar []string - -func (f *flagVar) String() string { - return fmt.Sprint([]string(*f)) -} - -func (f *flagVar) Set(value string) error { - *f = append(*f, value) - return nil -} - -func TestUserDefined(t *testing.T) { - var flags FlagSet - flags.Init("test", ContinueOnError) - var v flagVar - flags.Var(&v, []string{"v"}, "usage") - if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil { - t.Error(err) - } - if len(v) != 3 { - t.Fatal("expected 3 args; got ", len(v)) - } - expect := "[1 2 3]" - if v.String() != expect { - t.Errorf("expected value %q got %q", expect, v.String()) - } -} - -// Declare a user-defined boolean flag type. -type boolFlagVar struct { - count int -} - -func (b *boolFlagVar) String() string { - return fmt.Sprintf("%d", b.count) -} - -func (b *boolFlagVar) Set(value string) error { - if value == "true" { - b.count++ - } - return nil -} - -func (b *boolFlagVar) IsBoolFlag() bool { - return b.count < 4 -} - -func TestUserDefinedBool(t *testing.T) { - var flags FlagSet - flags.Init("test", ContinueOnError) - var b boolFlagVar - var err error - flags.Var(&b, []string{"b"}, "usage") - if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil { - if b.count < 4 { - t.Error(err) - } - } - - if b.count != 4 { - t.Errorf("want: %d; got: %d", 4, b.count) - } - - if err == nil { - t.Error("expected error; got none") - } -} - -func TestSetOutput(t *testing.T) { - var flags FlagSet - var buf bytes.Buffer - flags.SetOutput(&buf) - flags.Init("test", ContinueOnError) - flags.Parse([]string{"-unknown"}) - if out := buf.String(); !strings.Contains(out, "-unknown") { - t.Logf("expected output mentioning unknown; got %q", out) - } -} - -// This tests that one can reset the flags. This still works but not well, and is -// superseded by FlagSet. -func TestChangingArgs(t *testing.T) { - ResetForTesting(func() { t.Fatal("bad parse") }) - oldArgs := os.Args - defer func() { os.Args = oldArgs }() - os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"} - before := Bool([]string{"before"}, false, "") - if err := CommandLine.Parse(os.Args[1:]); err != nil { - t.Fatal(err) - } - cmd := Arg(0) - os.Args = Args() - after := Bool([]string{"after"}, false, "") - Parse() - args := Args() - - if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { - t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) - } -} - -// Test that -help invokes the usage message and returns ErrHelp. -func TestHelp(t *testing.T) { - var helpCalled = false - fs := NewFlagSet("help test", ContinueOnError) - fs.Usage = func() { helpCalled = true } - var flag bool - fs.BoolVar(&flag, []string{"flag"}, false, "regular flag") - // Regular flag invocation should work - err := fs.Parse([]string{"-flag=true"}) - if err != nil { - t.Fatal("expected no error; got ", err) - } - if !flag { - t.Error("flag was not set by -flag") - } - if helpCalled { - t.Error("help called for regular flag") - helpCalled = false // reset for next test - } - // Help flag should work as expected. - err = fs.Parse([]string{"-help"}) - if err == nil { - t.Fatal("error expected") - } - if err != ErrHelp { - t.Fatal("expected ErrHelp; got ", err) - } - if !helpCalled { - t.Fatal("help was not called") - } - // If we define a help flag, that should override. - var help bool - fs.BoolVar(&help, []string{"help"}, false, "help flag") - helpCalled = false - err = fs.Parse([]string{"-help"}) - if err != nil { - t.Fatal("expected no error for defined -help; got ", err) - } - if helpCalled { - t.Fatal("help was called; should not have been for defined help flag") - } -} - -// Test the flag count functions. -func TestFlagCounts(t *testing.T) { - fs := NewFlagSet("help test", ContinueOnError) - var flag bool - fs.BoolVar(&flag, []string{"flag1"}, false, "regular flag") - fs.BoolVar(&flag, []string{"#deprecated1"}, false, "regular flag") - fs.BoolVar(&flag, []string{"f", "flag2"}, false, "regular flag") - fs.BoolVar(&flag, []string{"#d", "#deprecated2"}, false, "regular flag") - fs.BoolVar(&flag, []string{"flag3"}, false, "regular flag") - fs.BoolVar(&flag, []string{"g", "#flag4", "-flag4"}, false, "regular flag") - - if fs.FlagCount() != 6 { - t.Fatal("FlagCount wrong. ", fs.FlagCount()) - } - if fs.FlagCountUndeprecated() != 4 { - t.Fatal("FlagCountUndeprecated wrong. ", fs.FlagCountUndeprecated()) - } - if fs.NFlag() != 0 { - t.Fatal("NFlag wrong. ", fs.NFlag()) - } - err := fs.Parse([]string{"-fd", "-g", "-flag4"}) - if err != nil { - t.Fatal("expected no error for defined -help; got ", err) - } - if fs.NFlag() != 4 { - t.Fatal("NFlag wrong. ", fs.NFlag()) - } -} - -// Show up bug in sortFlags -func TestSortFlags(t *testing.T) { - fs := NewFlagSet("help TestSortFlags", ContinueOnError) - - var err error - - var b bool - fs.BoolVar(&b, []string{"b", "-banana"}, false, "usage") - - err = fs.Parse([]string{"--banana=true"}) - if err != nil { - t.Fatal("expected no error; got ", err) - } - - count := 0 - - fs.VisitAll(func(flag *Flag) { - count++ - if flag == nil { - t.Fatal("VisitAll should not return a nil flag") - } - }) - flagcount := fs.FlagCount() - if flagcount != count { - t.Fatalf("FlagCount (%d) != number (%d) of elements visited", flagcount, count) - } - // Make sure its idempotent - if flagcount != fs.FlagCount() { - t.Fatalf("FlagCount (%d) != fs.FlagCount() (%d) of elements visited", flagcount, fs.FlagCount()) - } - - count = 0 - fs.Visit(func(flag *Flag) { - count++ - if flag == nil { - t.Fatal("Visit should not return a nil flag") - } - }) - nflag := fs.NFlag() - if nflag != count { - t.Fatalf("NFlag (%d) != number (%d) of elements visited", nflag, count) - } - if nflag != fs.NFlag() { - t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag()) - } -} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go index 17dbd7a6..d2fb1fb4 100644 --- a/vendor/github.com/docker/docker/pkg/mount/flags.go +++ b/vendor/github.com/docker/docker/pkg/mount/flags.go @@ -1,6 +1,7 @@ package mount import ( + "fmt" "strings" ) @@ -67,3 +68,25 @@ func parseOptions(options string) (int, string) { } return flag, strings.Join(data, ",") } + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := parseOptions(options) + validFlags := map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, + } + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go index 2f9f5c58..dc696dce 100644 --- a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go @@ -23,7 +23,7 @@ const ( SYNCHRONOUS = syscall.MS_SYNCHRONOUS // DIRSYNC will force all directory updates within the file system to be done - // synchronously. This affects the following system calls: creat, link, + // synchronously. This affects the following system calls: create, link, // unlink, symlink, mkdir, rmdir, mknod and rename. DIRSYNC = syscall.MS_DIRSYNC diff --git a/vendor/github.com/docker/docker/pkg/mount/mount_test.go b/vendor/github.com/docker/docker/pkg/mount/mount_test.go deleted file mode 100644 index 5c7f1b86..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/mount_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package mount - -import ( - "os" - "path" - "testing" -) - -func TestMountOptionsParsing(t *testing.T) { - options := "noatime,ro,size=10k" - - flag, data := parseOptions(options) - - if data != "size=10k" { - t.Fatalf("Expected size=10 got %s", data) - } - - expectedFlag := NOATIME | RDONLY - - if flag != expectedFlag { - t.Fatalf("Expected %d got %d", expectedFlag, flag) - } -} - -func TestMounted(t *testing.T) { - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - sourcePath = path.Join(sourceDir, "file.txt") - targetPath = path.Join(targetDir, "file.txt") - ) - - os.Mkdir(sourceDir, 0777) - os.Mkdir(targetDir, 0777) - - f, err := os.Create(sourcePath) - if err != nil { - t.Fatal(err) - } - f.WriteString("hello") - f.Close() - - f, err = os.Create(targetPath) - if err != nil { - t.Fatal(err) - } - f.Close() - - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - mounted, err := Mounted(targetDir) - if err != nil { - t.Fatal(err) - } - if !mounted { - t.Fatalf("Expected %s to be mounted", targetDir) - } - if _, err := os.Stat(targetDir); err != nil { - t.Fatal(err) - } -} - -func TestMountReadonly(t *testing.T) { - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - sourcePath = path.Join(sourceDir, "file.txt") - targetPath = path.Join(targetDir, "file.txt") - ) - - os.Mkdir(sourceDir, 0777) - os.Mkdir(targetDir, 0777) - - f, err := os.Create(sourcePath) - if err != nil { - t.Fatal(err) - } - f.WriteString("hello") - f.Close() - - f, err = os.Create(targetPath) - if err != nil { - t.Fatal(err) - } - f.Close() - - if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) - if err == nil { - t.Fatal("Should not be able to open a ro file as rw") - } -} - -func TestGetMounts(t *testing.T) { - mounts, err := GetMounts() - if err != nil { - t.Fatal(err) - } - - root := false - for _, entry := range mounts { - if entry.Mountpoint == "/" { - root = true - } - } - - if !root { - t.Fatal("/ should be mounted at least") - } -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go deleted file mode 100644 index 812d12e8..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go +++ /dev/null @@ -1,477 +0,0 @@ -// +build linux - -package mount - -import ( - "bytes" - "testing" -) - -const ( - fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw - 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel - 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 - 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw - 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw - 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel - 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 - 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 - 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 - 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd - 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw - 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children - 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children - 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children - 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children - 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children - 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children - 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children - 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children - 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children - 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered - 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct - 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel - 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel - 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel - 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw - 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw - 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw - 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw - 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered - 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered - 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered - 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered - 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 - 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw - 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered - 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered - 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered - 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered - 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered - 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered - 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered - 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered - 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered - 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered - 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered - 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered - 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered - 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered - 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered - 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered - 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered - 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered - 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered - 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered - 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered - 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered - 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` - - ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 -18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 -20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered -21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 -22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw -23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw -24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw -25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k -26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children -27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw -28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu -29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 -30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw -31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct -32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory -33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices -34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer -35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio -36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event -37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb -38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd -39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 -40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 -41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 -42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 -43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 -44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 -45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 -46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 -47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 -48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 -49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 -50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 -51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 -52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 -53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 -54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 -55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 -56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 -57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 -58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 -59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 -60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 -61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 -62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 -63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 -64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 -65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 -66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 -67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 -68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 -69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 -70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 -71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 -72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 -73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 -74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 -75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 -76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 -77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 -78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 -79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 -80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 -81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 -82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 -83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 -84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 -85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 -86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 -87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 -88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 -89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 -90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 -91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 -92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 -93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 -94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 -95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 -96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 -97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 -98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 -99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 -100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 -101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 -102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 -103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 -104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 -105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 -106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 -107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 -108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 -109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 -110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 -111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 -112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 -113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 -114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 -115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 -116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 -117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 -118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 -119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 -120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 -121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 -122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 -123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 -124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 -125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 -126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 -127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 -128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 -129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 -130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 -131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 -132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 -133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 -134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 -135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 -136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 -137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 -138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 -139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 -140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 -141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 -142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 -143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 -144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` - - gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 -18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 -19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw -20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw -22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw -24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 -25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc -26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children -27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children -28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children -29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children -30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children -31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children -32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children -33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro -34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota -35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw -36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw -42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw -43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw -44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 -68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c -85 68 8:6 /var/lib/docker/init/dockerinit-0.7.2-dev//deleted /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerinit rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c -39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c -40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c -41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c -45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c -46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c -47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c -48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c -49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c -50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c -51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c -52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c -53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c -54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c -55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c -56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c -57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c -59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c -60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c -61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c -62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c -63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c -64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c -65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c -66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c -70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c -71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c -72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c -73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c -76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c -77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c -78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c -79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c -80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c -81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c -82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c -83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c -84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c -94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c -95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c -96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c -97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c -98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c -102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c -103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c -104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c -105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c -106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c -107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c -108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c -109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c -110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c -111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c -112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c -113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c -114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c -117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c -118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c -119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c -120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c -121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c -122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c -123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c -126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c -127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c -128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c -130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c -131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c -132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c -133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c -134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c -135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c -136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c -137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c -138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c -139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c -140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c -141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c -142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c -143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c -144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c -147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c -150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c -151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c -152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c -153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c -154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c -155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c -156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c -157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c -158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c -159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c -160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c -162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c -163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c -164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c -165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c -166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c -167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c -168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c -169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c -170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c -171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c -172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c -173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c -174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c -184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c -187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c -188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c -189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c -190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c -191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c -192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c -193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c -194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c -195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c -196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c -197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c -198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c -199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c -200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c -201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c -202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c -203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c -204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c -205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c -206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c -207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c -208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c -209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c -210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c -211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c -212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c -213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c -214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c -215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c -216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c -217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c -218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c -219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c -220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c -221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c -222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c -223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c -224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c -225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c -226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c -227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c -228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c -229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c -230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c -231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c -232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c -233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c -234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c -235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c -237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c -238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c -239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c -240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c -241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c -242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c -243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c -244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c -245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c -246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c -247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c -249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c -250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c -251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c -252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c -253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c -254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c -255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c -256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c -257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c -259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c -260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c -261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c -262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c -263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c -264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c -58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c -67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c -265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c -270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c -273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c -278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c -281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c -286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c -289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c -99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` -) - -func TestParseFedoraMountinfo(t *testing.T) { - r := bytes.NewBuffer([]byte(fedoraMountinfo)) - _, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } -} - -func TestParseUbuntuMountinfo(t *testing.T) { - r := bytes.NewBuffer([]byte(ubuntuMountInfo)) - _, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } -} - -func TestParseGentooMountinfo(t *testing.T) { - r := bytes.NewBuffer([]byte(gentooMountinfo)) - _, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } -} - -func TestParseFedoraMountinfoFields(t *testing.T) { - r := bytes.NewBuffer([]byte(fedoraMountinfo)) - infos, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } - expectedLength := 58 - if len(infos) != expectedLength { - t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) - } - mi := Info{ - ID: 15, - Parent: 35, - Major: 0, - Minor: 3, - Root: "/", - Mountpoint: "/proc", - Opts: "rw,nosuid,nodev,noexec,relatime", - Optional: "shared:5", - Fstype: "proc", - Source: "proc", - VfsOpts: "rw", - } - - if *infos[0] != mi { - t.Fatalf("expected %#v, got %#v", mi, infos[0]) - } -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go index 8245f01d..b8d9aa5c 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!freebsd freebsd,!cgo +// +build !windows,!linux,!freebsd freebsd,!cgo package mount diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go index 47303bbc..8ceec84b 100644 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -61,8 +61,7 @@ func ensureMountedAs(mountPoint, options string) error { return err } } - mounted, err = Mounted(mountPoint) - if err != nil { + if _, err = Mounted(mountPoint); err != nil { return err } diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go deleted file mode 100644 index 4a8d22f0..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go +++ /dev/null @@ -1,331 +0,0 @@ -// +build linux - -package mount - -import ( - "os" - "path" - "syscall" - "testing" -) - -// nothing is propagated in or out -func TestSubtreePrivate(t *testing.T) { - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - outside1Dir = path.Join(tmp, "outside1") - outside2Dir = path.Join(tmp, "outside2") - - outside1Path = path.Join(outside1Dir, "file.txt") - outside2Path = path.Join(outside2Dir, "file.txt") - outside1CheckPath = path.Join(targetDir, "a", "file.txt") - outside2CheckPath = path.Join(sourceDir, "b", "file.txt") - ) - if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(targetDir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outside1Dir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outside2Dir, 0777); err != nil { - t.Fatal(err) - } - - if err := createFile(outside1Path); err != nil { - t.Fatal(err) - } - if err := createFile(outside2Path); err != nil { - t.Fatal(err) - } - - // mount the shared directory to a target - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // next, make the target private - if err := MakePrivate(targetDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // mount in an outside path to a mounted path inside the _source_ - if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(sourceDir, "a")); err != nil { - t.Fatal(err) - } - }() - - // check that this file _does_not_ show in the _target_ - if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { - t.Fatal(err) - } else if err == nil { - t.Fatalf("%q should not be visible, but is", outside1CheckPath) - } - - // next mount outside2Dir into the _target_ - if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(targetDir, "b")); err != nil { - t.Fatal(err) - } - }() - - // check that this file _does_not_ show in the _source_ - if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { - t.Fatal(err) - } else if err == nil { - t.Fatalf("%q should not be visible, but is", outside2CheckPath) - } -} - -// Testing that when a target is a shared mount, -// then child mounts propagate to the source -func TestSubtreeShared(t *testing.T) { - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - outsideDir = path.Join(tmp, "outside") - - outsidePath = path.Join(outsideDir, "file.txt") - sourceCheckPath = path.Join(sourceDir, "a", "file.txt") - ) - - if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(targetDir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outsideDir, 0777); err != nil { - t.Fatal(err) - } - - if err := createFile(outsidePath); err != nil { - t.Fatal(err) - } - - // mount the source as shared - if err := MakeShared(sourceDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(sourceDir); err != nil { - t.Fatal(err) - } - }() - - // mount the shared directory to a target - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // mount in an outside path to a mounted path inside the target - if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(targetDir, "a")); err != nil { - t.Fatal(err) - } - }() - - // NOW, check that the file from the outside directory is avaible in the source directory - if _, err := os.Stat(sourceCheckPath); err != nil { - t.Fatal(err) - } -} - -// testing that mounts to a shared source show up in the slave target, -// and that mounts into a slave target do _not_ show up in the shared source -func TestSubtreeSharedSlave(t *testing.T) { - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - outside1Dir = path.Join(tmp, "outside1") - outside2Dir = path.Join(tmp, "outside2") - - outside1Path = path.Join(outside1Dir, "file.txt") - outside2Path = path.Join(outside2Dir, "file.txt") - outside1CheckPath = path.Join(targetDir, "a", "file.txt") - outside2CheckPath = path.Join(sourceDir, "b", "file.txt") - ) - if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(targetDir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outside1Dir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outside2Dir, 0777); err != nil { - t.Fatal(err) - } - - if err := createFile(outside1Path); err != nil { - t.Fatal(err) - } - if err := createFile(outside2Path); err != nil { - t.Fatal(err) - } - - // mount the source as shared - if err := MakeShared(sourceDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(sourceDir); err != nil { - t.Fatal(err) - } - }() - - // mount the shared directory to a target - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // next, make the target slave - if err := MakeSlave(targetDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // mount in an outside path to a mounted path inside the _source_ - if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(sourceDir, "a")); err != nil { - t.Fatal(err) - } - }() - - // check that this file _does_ show in the _target_ - if _, err := os.Stat(outside1CheckPath); err != nil { - t.Fatal(err) - } - - // next mount outside2Dir into the _target_ - if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(targetDir, "b")); err != nil { - t.Fatal(err) - } - }() - - // check that this file _does_not_ show in the _source_ - if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { - t.Fatal(err) - } else if err == nil { - t.Fatalf("%q should not be visible, but is", outside2CheckPath) - } -} - -func TestSubtreeUnbindable(t *testing.T) { - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - ) - if err := os.MkdirAll(sourceDir, 0777); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(targetDir, 0777); err != nil { - t.Fatal(err) - } - - // next, make the source unbindable - if err := MakeUnbindable(sourceDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(sourceDir); err != nil { - t.Fatal(err) - } - }() - - // then attempt to mount it to target. It should fail - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { - t.Fatal(err) - } else if err == nil { - t.Fatalf("%q should not have been bindable", sourceDir) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() -} - -func createFile(path string) error { - f, err := os.Create(path) - if err != nil { - return err - } - f.WriteString("hello world!") - return f.Close() -} diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go new file mode 100644 index 00000000..1764fc28 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go @@ -0,0 +1,524 @@ +package namesgenerator + +import ( + "fmt" + + "github.com/docker/docker/pkg/random" +) + +var ( + left = [...]string{ + "admiring", + "adoring", + "agitated", + "amazing", + "angry", + "awesome", + "backstabbing", + "berserk", + "big", + "boring", + "clever", + "cocky", + "compassionate", + "condescending", + "cranky", + "desperate", + "determined", + "distracted", + "dreamy", + "drunk", + "ecstatic", + "elated", + "elegant", + "evil", + "fervent", + "focused", + "furious", + "gigantic", + "gloomy", + "goofy", + "grave", + "happy", + "high", + "hopeful", + "hungry", + "insane", + "jolly", + "jovial", + "kickass", + "lonely", + "loving", + "mad", + "modest", + "naughty", + "nauseous", + "nostalgic", + "pedantic", + "pensive", + "prickly", + "reverent", + "romantic", + "sad", + "serene", + "sharp", + "sick", + "silly", + "sleepy", + "small", + "stoic", + "stupefied", + "suspicious", + "tender", + "thirsty", + "tiny", + "trusting", + } + + // Docker, starting from 0.7.x, generates names from notable scientists and hackers. + // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. + right = [...]string{ + // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB + "albattani", + + // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen + "allen", + + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida + "almeida", + + // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes + "archimedes", + + // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli + "ardinghelli", + + // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata + "aryabhata", + + // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin + "austin", + + // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. + "babbage", + + // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach + "banach", + + // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen + "bardeen", + + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik + "bartik", + + // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi + "bassi", + + // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell + "bell", + + // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha + "bhabha", + + // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus + "bhaskara", + + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell + "blackwell", + + // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. + "bohr", + + // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth + "booth", + + // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg + "borg", + + // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose + "bose", + + // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville + "boyd", + + // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero + "brahmagupta", + + // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain + "brattain", + + // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) + "brown", + + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson + "carson", + + // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar + "chandrasekhar", + + // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden + "colden", + + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori + "cori", + + // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray + "cray", + + // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. + "curie", + + // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. + "darwin", + + // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. + "davinci", + + // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. + "dijkstra", + + // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky + "dubinsky", + + // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley + "easley", + + // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein + "einstein", + + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion + "elion", + + // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart + "engelbart", + + // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid + "euclid", + + // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler + "euler", + + // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat + "fermat", + + // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. + "fermi", + + // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman + "feynman", + + // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. + "franklin", + + // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei + "galileo", + + // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates + "gates", + + // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) + "goldberg", + + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine + "goldstine", + + // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser + "goldwasser", + + // James Golick, all around gangster. + "golick", + + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall + "goodall", + + // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) + "hamilton", + + // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking + "hawking", + + // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg + "heisenberg", + + // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD + "heyrovsky", + + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin + "hodgkin", + + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephon switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover + "hoover", + + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper + "hopper", + + // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle + "hugle", + + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia + "hypatia", + + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil + "jang", + + // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik + "jennings", + + // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen + "jepsen", + + // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie + "joliot", + + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones + "jones", + + // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam + "kalam", + + // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare + "kare", + + // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller + "keller", + + // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana + "khorana", + + // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby + "kilby", + + // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + "kirch", + + // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth + "knuth", + + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya + "kowalevski", + + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + "lalande", + + // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr + "lamarr", + + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey + "leakey", + + // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt + "leavitt", + + // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum + "lichterman", + + // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov + "liskov", + + // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + "lovelace", + + // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re + "lumiere", + + // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) + "mahavira", + + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer + "mayer", + + // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + "mccarthy", + + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock + "mcclintock", + + // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean + "mclean", + + // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli + "mcnulty", + + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner + "meitner", + + // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky + "meninsky", + + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf + "mestorf", + + // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky + "minsky", + + // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani + "mirzakhani", + + // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse + "morse", + + // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock + "murdock", + + // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton + "newton", + + // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel + "nobel", + + // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether + "noether", + + // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 + "northcutt", + + // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce + "noyce", + + // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems + "panini", + + // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 + "pare", + + // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. + "pasteur", + + // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin + "payne", + + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman + "perlman", + + // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike + "pike", + + // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + "poincare", + + // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras + "poitras", + + // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy + "ptolemy", + + // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman + "raman", + + // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan + "ramanujan", + + // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride + "ride", + + // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie + "ritchie", + + // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen + "roentgen", + + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin + "rosalind", + + // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha + "saha", + + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet + "sammet", + + // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) + "shaw", + + // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley + "shirley", + + // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley + "shockley", + + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi + "sinoussi", + + // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton + "snyder", + + // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence + "spence", + + // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman + "stallman", + + // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker + "stonebraker", + + // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson + "swanson", + + // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz + "swartz", + + // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles + "swirles", + + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla + "tesla", + + // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson + "thompson", + + // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds + "torvalds", + + // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. + "turing", + + // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions + "varahamihira", + + // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya + "visvesvaraya", + + // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard + "volhard", + + // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer + "wescoff", + + // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams + "williams", + + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson + "wilson", + + // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing + "wing", + + // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak + "wozniak", + + // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers + "wright", + + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow + "yalow", + + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath + "yonath", + } +) + +// GetRandomName generates a random name from the list of adjectives and surnames in this package +// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random +// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` +func GetRandomName(retry int) string { + rnd := random.Rand +begin: + name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin + } + + if retry > 0 { + name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) + } + return name +} diff --git a/vendor/github.com/docker/docker/pkg/nat/nat_test.go b/vendor/github.com/docker/docker/pkg/nat/nat_test.go deleted file mode 100644 index 2c71142b..00000000 --- a/vendor/github.com/docker/docker/pkg/nat/nat_test.go +++ /dev/null @@ -1,525 +0,0 @@ -package nat - -import ( - "testing" -) - -func TestParsePort(t *testing.T) { - var ( - p int - err error - ) - - p, err = ParsePort("1234") - - if err != nil || p != 1234 { - t.Fatal("Parsing '1234' did not succeed") - } - - // FIXME currently this is a valid port. I don't think it should be. - // I'm leaving this test commented out until we make a decision. - // - erikh - - /* - p, err = ParsePort("0123") - - if err != nil { - t.Fatal("Successfully parsed port '0123' to '123'") - } - */ - - p, err = ParsePort("asdf") - - if err == nil || p != 0 { - t.Fatal("Parsing port 'asdf' succeeded") - } - - p, err = ParsePort("1asdf") - - if err == nil || p != 0 { - t.Fatal("Parsing port '1asdf' succeeded") - } -} - -func TestParsePortRange(t *testing.T) { - var ( - begin int - end int - err error - ) - - type TestRange struct { - Range string - Begin int - End int - } - validRanges := []TestRange{ - {"1234", 1234, 1234}, - {"1234-1234", 1234, 1234}, - {"1234-1235", 1234, 1235}, - {"8000-9000", 8000, 9000}, - {"0", 0, 0}, - {"0-0", 0, 0}, - } - - for _, r := range validRanges { - begin, end, err = ParsePortRange(r.Range) - - if err != nil || begin != r.Begin { - t.Fatalf("Parsing port range '%s' did not succeed. Expected begin %d, got %d", r.Range, r.Begin, begin) - } - if err != nil || end != r.End { - t.Fatalf("Parsing port range '%s' did not succeed. Expected end %d, got %d", r.Range, r.End, end) - } - } - - invalidRanges := []string{ - "asdf", - "1asdf", - "9000-8000", - "9000-", - "-8000", - "-8000-", - } - - for _, r := range invalidRanges { - begin, end, err = ParsePortRange(r) - - if err == nil || begin != 0 || end != 0 { - t.Fatalf("Parsing port range '%s' succeeded", r) - } - } -} - -func TestPort(t *testing.T) { - p, err := NewPort("tcp", "1234") - - if err != nil { - t.Fatalf("tcp, 1234 had a parsing issue: %v", err) - } - - if string(p) != "1234/tcp" { - t.Fatal("tcp, 1234 did not result in the string 1234/tcp") - } - - if p.Proto() != "tcp" { - t.Fatal("protocol was not tcp") - } - - if p.Port() != "1234" { - t.Fatal("port string value was not 1234") - } - - if p.Int() != 1234 { - t.Fatal("port int value was not 1234") - } - - p, err = NewPort("tcp", "asd1234") - if err == nil { - t.Fatal("tcp, asd1234 was supposed to fail") - } - - p, err = NewPort("tcp", "1234-1230") - if err == nil { - t.Fatal("tcp, 1234-1230 was supposed to fail") - } - - p, err = NewPort("tcp", "1234-1242") - if err != nil { - t.Fatalf("tcp, 1234-1242 had a parsing issue: %v", err) - } - - if string(p) != "1234-1242/tcp" { - t.Fatal("tcp, 1234-1242 did not result in the string 1234-1242/tcp") - } -} - -func TestSplitProtoPort(t *testing.T) { - var ( - proto string - port string - ) - - proto, port = SplitProtoPort("1234/tcp") - - if proto != "tcp" || port != "1234" { - t.Fatal("Could not split 1234/tcp properly") - } - - proto, port = SplitProtoPort("") - - if proto != "" || port != "" { - t.Fatal("parsing an empty string yielded surprising results", proto, port) - } - - proto, port = SplitProtoPort("1234") - - if proto != "tcp" || port != "1234" { - t.Fatal("tcp is not the default protocol for portspec '1234'", proto, port) - } - - proto, port = SplitProtoPort("1234/") - - if proto != "tcp" || port != "1234" { - t.Fatal("parsing '1234/' yielded:" + port + "/" + proto) - } - - proto, port = SplitProtoPort("/tcp") - - if proto != "" || port != "" { - t.Fatal("parsing '/tcp' yielded:" + port + "/" + proto) - } -} - -func TestParsePortSpecs(t *testing.T) { - var ( - portMap map[Port]struct{} - bindingMap map[Port][]PortBinding - err error - ) - - portMap, bindingMap, err = ParsePortSpecs([]string{"1234/tcp", "2345/udp"}) - - if err != nil { - t.Fatalf("Error while processing ParsePortSpecs: %s", err) - } - - if _, ok := portMap[Port("1234/tcp")]; !ok { - t.Fatal("1234/tcp was not parsed properly") - } - - if _, ok := portMap[Port("2345/udp")]; !ok { - t.Fatal("2345/udp was not parsed properly") - } - - for portspec, bindings := range bindingMap { - if len(bindings) != 1 { - t.Fatalf("%s should have exactly one binding", portspec) - } - - if bindings[0].HostIP != "" { - t.Fatalf("HostIP should not be set for %s", portspec) - } - - if bindings[0].HostPort != "" { - t.Fatalf("HostPort should not be set for %s", portspec) - } - } - - portMap, bindingMap, err = ParsePortSpecs([]string{"1234:1234/tcp", "2345:2345/udp"}) - - if err != nil { - t.Fatalf("Error while processing ParsePortSpecs: %s", err) - } - - if _, ok := portMap[Port("1234/tcp")]; !ok { - t.Fatal("1234/tcp was not parsed properly") - } - - if _, ok := portMap[Port("2345/udp")]; !ok { - t.Fatal("2345/udp was not parsed properly") - } - - for portspec, bindings := range bindingMap { - _, port := SplitProtoPort(string(portspec)) - - if len(bindings) != 1 { - t.Fatalf("%s should have exactly one binding", portspec) - } - - if bindings[0].HostIP != "" { - t.Fatalf("HostIP should not be set for %s", portspec) - } - - if bindings[0].HostPort != port { - t.Fatalf("HostPort should be %s for %s", port, portspec) - } - } - - portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234:1234/tcp", "0.0.0.0:2345:2345/udp"}) - - if err != nil { - t.Fatalf("Error while processing ParsePortSpecs: %s", err) - } - - if _, ok := portMap[Port("1234/tcp")]; !ok { - t.Fatal("1234/tcp was not parsed properly") - } - - if _, ok := portMap[Port("2345/udp")]; !ok { - t.Fatal("2345/udp was not parsed properly") - } - - for portspec, bindings := range bindingMap { - _, port := SplitProtoPort(string(portspec)) - - if len(bindings) != 1 { - t.Fatalf("%s should have exactly one binding", portspec) - } - - if bindings[0].HostIP != "0.0.0.0" { - t.Fatalf("HostIP is not 0.0.0.0 for %s", portspec) - } - - if bindings[0].HostPort != port { - t.Fatalf("HostPort should be %s for %s", port, portspec) - } - } - - _, _, err = ParsePortSpecs([]string{"localhost:1234:1234/tcp"}) - - if err == nil { - t.Fatal("Received no error while trying to parse a hostname instead of ip") - } -} - -func TestParsePortSpecsWithRange(t *testing.T) { - var ( - portMap map[Port]struct{} - bindingMap map[Port][]PortBinding - err error - ) - - portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236/tcp", "2345-2347/udp"}) - - if err != nil { - t.Fatalf("Error while processing ParsePortSpecs: %s", err) - } - - if _, ok := portMap[Port("1235/tcp")]; !ok { - t.Fatal("1234/tcp was not parsed properly") - } - - if _, ok := portMap[Port("2346/udp")]; !ok { - t.Fatal("2345/udp was not parsed properly") - } - - for portspec, bindings := range bindingMap { - if len(bindings) != 1 { - t.Fatalf("%s should have exactly one binding", portspec) - } - - if bindings[0].HostIP != "" { - t.Fatalf("HostIP should not be set for %s", portspec) - } - - if bindings[0].HostPort != "" { - t.Fatalf("HostPort should not be set for %s", portspec) - } - } - - portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236:1234-1236/tcp", "2345-2347:2345-2347/udp"}) - - if err != nil { - t.Fatalf("Error while processing ParsePortSpecs: %s", err) - } - - if _, ok := portMap[Port("1235/tcp")]; !ok { - t.Fatal("1234/tcp was not parsed properly") - } - - if _, ok := portMap[Port("2346/udp")]; !ok { - t.Fatal("2345/udp was not parsed properly") - } - - for portspec, bindings := range bindingMap { - _, port := SplitProtoPort(string(portspec)) - if len(bindings) != 1 { - t.Fatalf("%s should have exactly one binding", portspec) - } - - if bindings[0].HostIP != "" { - t.Fatalf("HostIP should not be set for %s", portspec) - } - - if bindings[0].HostPort != port { - t.Fatalf("HostPort should be %s for %s", port, portspec) - } - } - - portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234-1236:1234-1236/tcp", "0.0.0.0:2345-2347:2345-2347/udp"}) - - if err != nil { - t.Fatalf("Error while processing ParsePortSpecs: %s", err) - } - - if _, ok := portMap[Port("1235/tcp")]; !ok { - t.Fatal("1234/tcp was not parsed properly") - } - - if _, ok := portMap[Port("2346/udp")]; !ok { - t.Fatal("2345/udp was not parsed properly") - } - - for portspec, bindings := range bindingMap { - _, port := SplitProtoPort(string(portspec)) - if len(bindings) != 1 || bindings[0].HostIP != "0.0.0.0" || bindings[0].HostPort != port { - t.Fatalf("Expect single binding to port %s but found %s", port, bindings) - } - } - - _, _, err = ParsePortSpecs([]string{"localhost:1234-1236:1234-1236/tcp"}) - - if err == nil { - t.Fatal("Received no error while trying to parse a hostname instead of ip") - } -} - -func TestParseNetworkOptsPrivateOnly(t *testing.T) { - ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::80"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "tcp" { - t.Logf("Expected tcp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "80" { - t.Logf("Expected 80 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "" { - t.Logf("Expected \"\" got %s", s.HostPort) - t.Fail() - } - if s.HostIP != "192.168.1.100" { - t.Fail() - } - } -} - -func TestParseNetworkOptsPublic(t *testing.T) { - ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:8080:80"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "tcp" { - t.Logf("Expected tcp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "80" { - t.Logf("Expected 80 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "8080" { - t.Logf("Expected 8080 got %s", s.HostPort) - t.Fail() - } - if s.HostIP != "192.168.1.100" { - t.Fail() - } - } -} - -func TestParseNetworkOptsPublicNoPort(t *testing.T) { - ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100"}) - - if err == nil { - t.Logf("Expected error Invalid containerPort") - t.Fail() - } - if ports != nil { - t.Logf("Expected nil got %s", ports) - t.Fail() - } - if bindings != nil { - t.Logf("Expected nil got %s", bindings) - t.Fail() - } -} - -func TestParseNetworkOptsNegativePorts(t *testing.T) { - ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:-1:-1"}) - - if err == nil { - t.Fail() - } - if len(ports) != 0 { - t.Logf("Expected nil got %d", len(ports)) - t.Fail() - } - if len(bindings) != 0 { - t.Logf("Expected 0 got %d", len(bindings)) - t.Fail() - } -} - -func TestParseNetworkOptsUdp(t *testing.T) { - ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "udp" { - t.Logf("Expected udp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "6000" { - t.Logf("Expected 6000 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "" { - t.Logf("Expected \"\" got %s", s.HostPort) - t.Fail() - } - if s.HostIP != "192.168.1.100" { - t.Fail() - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/nat/sort_test.go b/vendor/github.com/docker/docker/pkg/nat/sort_test.go deleted file mode 100644 index 88ed9111..00000000 --- a/vendor/github.com/docker/docker/pkg/nat/sort_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package nat - -import ( - "fmt" - "reflect" - "testing" -) - -func TestSortUniquePorts(t *testing.T) { - ports := []Port{ - Port("6379/tcp"), - Port("22/tcp"), - } - - Sort(ports, func(ip, jp Port) bool { - return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") - }) - - first := ports[0] - if fmt.Sprint(first) != "22/tcp" { - t.Log(fmt.Sprint(first)) - t.Fail() - } -} - -func TestSortSamePortWithDifferentProto(t *testing.T) { - ports := []Port{ - Port("8888/tcp"), - Port("8888/udp"), - Port("6379/tcp"), - Port("6379/udp"), - } - - Sort(ports, func(ip, jp Port) bool { - return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") - }) - - first := ports[0] - if fmt.Sprint(first) != "6379/tcp" { - t.Fail() - } -} - -func TestSortPortMap(t *testing.T) { - ports := []Port{ - Port("22/tcp"), - Port("22/udp"), - Port("8000/tcp"), - Port("6379/tcp"), - Port("9999/tcp"), - } - - portMap := PortMap{ - Port("22/tcp"): []PortBinding{ - {}, - }, - Port("8000/tcp"): []PortBinding{ - {}, - }, - Port("6379/tcp"): []PortBinding{ - {}, - {HostIP: "0.0.0.0", HostPort: "32749"}, - }, - Port("9999/tcp"): []PortBinding{ - {HostIP: "0.0.0.0", HostPort: "40000"}, - }, - } - - SortPortMap(ports, portMap) - if !reflect.DeepEqual(ports, []Port{ - Port("9999/tcp"), - Port("6379/tcp"), - Port("8000/tcp"), - Port("22/tcp"), - Port("22/udp"), - }) { - t.Errorf("failed to prioritize port with explicit mappings, got %v", ports) - } - if pm := portMap[Port("6379/tcp")]; !reflect.DeepEqual(pm, []PortBinding{ - {HostIP: "0.0.0.0", HostPort: "32749"}, - {}, - }) { - t.Errorf("failed to prioritize bindings with explicit mappings, got %v", pm) - } -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go deleted file mode 100644 index dc8c0e30..00000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// +build !windows - -package kernel - -import ( - "fmt" - "testing" -) - -func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int) { - var ( - a *VersionInfo - ) - a, _ = ParseRelease(release) - - if r := CompareKernelVersion(*a, *b); r != result { - t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result) - } - if a.Flavor != b.Flavor { - t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) - } -} - -// TestParseRelease tests the ParseRelease() function -func TestParseRelease(t *testing.T) { - assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) - assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) - assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) - assertParseRelease(t, "3.8.0-19-generic", &VersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) - assertParseRelease(t, "3.12.8tag", &VersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) - assertParseRelease(t, "3.12-1-amd64", &VersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) - assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1) - // Errors - invalids := []string{ - "3", - "a", - "a.a", - "a.a.a-a", - } - for _, invalid := range invalids { - expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid) - if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage { - - } - } -} - -func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) { - if r := CompareKernelVersion(a, b); r != result { - t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) - } -} - -// TestCompareKernelVersion tests the CompareKernelVersion() function -func TestCompareKernelVersion(t *testing.T) { - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - 0) - assertKernelVersion(t, - VersionInfo{Kernel: 2, Major: 6, Minor: 0}, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - -1) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - VersionInfo{Kernel: 2, Major: 6, Minor: 0}, - 1) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - 0) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 8, Minor: 5}, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - 1) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 0, Minor: 20}, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - -1) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 7, Minor: 20}, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - -1) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 8, Minor: 20}, - VersionInfo{Kernel: 3, Major: 7, Minor: 0}, - 1) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 8, Minor: 20}, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - 1) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - VersionInfo{Kernel: 3, Major: 8, Minor: 20}, - -1) -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go deleted file mode 100644 index 85ca250c..00000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go +++ /dev/null @@ -1,67 +0,0 @@ -package kernel - -import ( - "fmt" - "syscall" - "unsafe" -) - -// VersionInfo holds information about the kernel. -type VersionInfo struct { - kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) - major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) - minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) - build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) -} - -func (k *VersionInfo) String() string { - return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) -} - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - - var ( - h syscall.Handle - dwVersion uint32 - err error - ) - - KVI := &VersionInfo{"Unknown", 0, 0, 0} - - if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, - syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), - 0, - syscall.KEY_READ, - &h); err != nil { - return KVI, err - } - defer syscall.RegCloseKey(h) - - var buf [1 << 10]uint16 - var typ uint32 - n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 - - if err = syscall.RegQueryValueEx(h, - syscall.StringToUTF16Ptr("BuildLabEx"), - nil, - &typ, - (*byte)(unsafe.Pointer(&buf[0])), - &n); err != nil { - return KVI, err - } - - KVI.kvi = syscall.UTF16ToString(buf[:]) - - // Important - docker.exe MUST be manifested for this API to return - // the correct information. - if dwVersion, err = syscall.GetVersion(); err != nil { - return KVI, err - } - - KVI.major = int(dwVersion & 0xFF) - KVI.minor = int((dwVersion & 0XFF00) >> 8) - KVI.build = int((dwVersion & 0xFFFF0000) >> 16) - - return KVI, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go new file mode 100644 index 00000000..0589cf2a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go @@ -0,0 +1,18 @@ +package operatingsystem + +import ( + "errors" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + // TODO: Implement OS detection + return "", errors.New("Cannot detect OS version") +} + +// IsContainerized returns true if we are running inside a container. +// No-op on FreeBSD, always returns false. +func IsContainerized() (bool, error) { + // TODO: Implement jail detection + return false, errors.New("Cannot detect if we are in container") +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go new file mode 100644 index 00000000..e04a3499 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go @@ -0,0 +1,77 @@ +// Package operatingsystem provides helper function to get the operating system +// name for different platforms. +package operatingsystem + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/mattn/go-shellwords" +) + +var ( + // file to use to detect if the daemon is running in a container + proc1Cgroup = "/proc/1/cgroup" + + // file to check to determine Operating System + etcOsRelease = "/etc/os-release" + + // used by stateless systems like Clear Linux + altOsRelease = "/usr/lib/os-release" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + osReleaseFile, err := os.Open(etcOsRelease) + if err != nil { + if !os.IsNotExist(err) { + return "", fmt.Errorf("Error opening %s: %v", etcOsRelease, err) + } + osReleaseFile, err = os.Open(altOsRelease) + if err != nil { + return "", fmt.Errorf("Error opening %s: %v", altOsRelease, err) + } + } + defer osReleaseFile.Close() + + var prettyName string + scanner := bufio.NewScanner(osReleaseFile) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "PRETTY_NAME=") { + data := strings.SplitN(line, "=", 2) + prettyNames, err := shellwords.Parse(data[1]) + if err != nil { + return "", fmt.Errorf("PRETTY_NAME is invalid: %s", err.Error()) + } + if len(prettyNames) != 1 { + return "", fmt.Errorf("PRETTY_NAME needs to be enclosed by quotes if they have spaces: %s", data[1]) + } + prettyName = prettyNames[0] + } + } + if prettyName != "" { + return prettyName, nil + } + // If not set, defaults to PRETTY_NAME="Linux" + // c.f. http://www.freedesktop.org/software/systemd/man/os-release.html + return "Linux", nil +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + b, err := ioutil.ReadFile(proc1Cgroup) + if err != nil { + return false, err + } + for _, line := range bytes.Split(b, []byte{'\n'}) { + if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) && !bytes.HasSuffix(line, []byte("init.scope")) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/github.com/docker/docker/pkg/parsers/parsers.go index 453cccfb..acc89716 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/parsers.go +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers.go @@ -5,158 +5,10 @@ package parsers import ( "fmt" - "net" - "net/url" - "path" - "runtime" "strconv" "strings" ) -// ParseDockerDaemonHost parses the specified address and returns an address that will be used as the host. -// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr -// defaultUnixAddr must be a absolute file path (no `unix://` prefix) -// defaultTCPAddr must be the full `tcp://host:port` form -func ParseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) { - addr = strings.TrimSpace(addr) - if addr == "" { - if defaultAddr == defaultTLSHost { - return defaultTLSHost, nil - } - if runtime.GOOS != "windows" { - return fmt.Sprintf("unix://%s", defaultUnixAddr), nil - } - return defaultTCPAddr, nil - } - addrParts := strings.Split(addr, "://") - if len(addrParts) == 1 { - addrParts = []string{"tcp", addrParts[0]} - } - - switch addrParts[0] { - case "tcp": - return ParseTCPAddr(addrParts[1], defaultTCPAddr) - case "unix": - return ParseUnixAddr(addrParts[1], defaultUnixAddr) - case "fd": - return addr, nil - default: - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } -} - -// ParseUnixAddr parses and validates that the specified address is a valid UNIX -// socket address. It returns a formatted UNIX socket address, either using the -// address parsed from addr, or the contents of defaultAddr if addr is a blank -// string. -func ParseUnixAddr(addr string, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, "unix://") - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) - } - if addr == "" { - addr = defaultAddr - } - return fmt.Sprintf("unix://%s", addr), nil -} - -// ParseTCPAddr parses and validates that the specified address is a valid TCP -// address. It returns a formatted TCP address, either using the address parsed -// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. -// tryAddr is expected to have already been Trim()'d -// defaultAddr must be in the full `tcp://host:port` form -func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { - if tryAddr == "" || tryAddr == "tcp://" { - return defaultAddr, nil - } - addr := strings.TrimPrefix(tryAddr, "tcp://") - if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) - } - - defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") - defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) - if err != nil { - return "", err - } - // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but - // not 1.4. See https://github.com/golang/go/issues/12200 and - // https://github.com/golang/go/issues/6530. - if strings.HasSuffix(addr, "]:") { - addr += defaultPort - } - - u, err := url.Parse("tcp://" + addr) - if err != nil { - return "", err - } - - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - if host == "" { - host = defaultHost - } - if port == "" { - port = defaultPort - } - p, err := strconv.Atoi(port) - if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - if net.ParseIP(host).To4() == nil && strings.Contains(host, ":") { - // This is either an ipv6 address - host = "[" + host + "]" - } - return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil -} - -// ParseRepositoryTag gets a repos name and returns the right reposName + tag|digest -// The tag can be confusing because of a port in a repository name. -// Ex: localhost.localdomain:5000/samalba/hipache:latest -// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb -func ParseRepositoryTag(repos string) (string, string) { - n := strings.Index(repos, "@") - if n >= 0 { - parts := strings.Split(repos, "@") - return parts[0], parts[1] - } - n = strings.LastIndex(repos, ":") - if n < 0 { - return repos, "" - } - if tag := repos[n+1:]; !strings.Contains(tag, "/") { - return repos[:n], tag - } - return repos, "" -} - -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - // ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) func ParseKeyValueOpt(opt string) (string, string, error) { parts := strings.SplitN(opt, "=", 2) @@ -166,54 +18,6 @@ func ParseKeyValueOpt(opt string) (string, string, error) { return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil } -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} - -// ParseLink parses and validates the specified string as a link format (name:alias) -func ParseLink(val string) (string, string, error) { - if val == "" { - return "", "", fmt.Errorf("empty string specified for links") - } - arr := strings.Split(val, ":") - if len(arr) > 2 { - return "", "", fmt.Errorf("bad format for links: %s", val) - } - if len(arr) == 1 { - return val, val, nil - } - // This is kept because we can actually get an HostConfig with links - // from an already created container and the format is not `foo:bar` - // but `/foo:/c1/bar` - if strings.HasPrefix(arr[0], "/") { - _, alias := path.Split(arr[1]) - return arr[0][1:], alias, nil - } - return arr[0], arr[1], nil -} - // ParseUintList parses and validates the specified string as the value // found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be // one of the formats below. Note that duplicates are actually allowed in the diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go b/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go deleted file mode 100644 index 47b45281..00000000 --- a/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go +++ /dev/null @@ -1,295 +0,0 @@ -package parsers - -import ( - "reflect" - "runtime" - "strings" - "testing" -) - -func TestParseDockerDaemonHost(t *testing.T) { - var ( - defaultHTTPHost = "tcp://localhost:2375" - defaultHTTPSHost = "tcp://localhost:2376" - defaultUnix = "/var/run/docker.sock" - defaultHOST = "unix:///var/run/docker.sock" - ) - if runtime.GOOS == "windows" { - defaultHOST = defaultHTTPHost - } - invalids := map[string]string{ - "0.0.0.0": "Invalid bind address format: 0.0.0.0", - "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", - "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", - "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", - "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", - "tcp://unix:///run/docker.sock": "Invalid bind address format: unix", - "tcp": "Invalid bind address format: tcp", - "unix": "Invalid bind address format: unix", - "fd": "Invalid bind address format: fd", - } - valids := map[string]string{ - "0.0.0.1:": "tcp://0.0.0.1:2375", - "0.0.0.1:5555": "tcp://0.0.0.1:5555", - "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", - "[::1]:": "tcp://[::1]:2375", - "[::1]:5555/path": "tcp://[::1]:5555/path", - "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2375", - "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", - ":6666": "tcp://localhost:6666", - ":6666/path": "tcp://localhost:6666/path", - "": defaultHOST, - " ": defaultHOST, - " ": defaultHOST, - "tcp://": defaultHTTPHost, - "tcp://:7777": "tcp://localhost:7777", - "tcp://:7777/path": "tcp://localhost:7777/path", - " tcp://:7777/path ": "tcp://localhost:7777/path", - "unix:///run/docker.sock": "unix:///run/docker.sock", - "unix://": "unix:///var/run/docker.sock", - "fd://": "fd://", - "fd://something": "fd://something", - "localhost:": "tcp://localhost:2375", - "localhost:5555": "tcp://localhost:5555", - "localhost:5555/path": "tcp://localhost:5555/path", - } - for invalidAddr, expectedError := range invalids { - if addr, err := ParseDockerDaemonHost(defaultHTTPHost, defaultHTTPSHost, defaultUnix, "", invalidAddr); err == nil || err.Error() != expectedError { - t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) - } - } - for validAddr, expectedAddr := range valids { - if addr, err := ParseDockerDaemonHost(defaultHTTPHost, defaultHTTPSHost, defaultUnix, "", validAddr); err != nil || addr != expectedAddr { - t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) - } - } -} - -func TestParseTCP(t *testing.T) { - var ( - defaultHTTPHost = "tcp://127.0.0.1:2376" - ) - invalids := map[string]string{ - "0.0.0.0": "Invalid bind address format: 0.0.0.0", - "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", - "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", - "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", - "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", - } - valids := map[string]string{ - "": defaultHTTPHost, - "tcp://": defaultHTTPHost, - "0.0.0.1:": "tcp://0.0.0.1:2376", - "0.0.0.1:5555": "tcp://0.0.0.1:5555", - "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", - ":6666": "tcp://127.0.0.1:6666", - ":6666/path": "tcp://127.0.0.1:6666/path", - "tcp://:7777": "tcp://127.0.0.1:7777", - "tcp://:7777/path": "tcp://127.0.0.1:7777/path", - "[::1]:": "tcp://[::1]:2376", - "[::1]:5555": "tcp://[::1]:5555", - "[::1]:5555/path": "tcp://[::1]:5555/path", - "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2376", - "[0:0:0:0:0:0:0:1]:5555": "tcp://[0:0:0:0:0:0:0:1]:5555", - "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", - "localhost:": "tcp://localhost:2376", - "localhost:5555": "tcp://localhost:5555", - "localhost:5555/path": "tcp://localhost:5555/path", - } - for invalidAddr, expectedError := range invalids { - if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { - t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) - } - } - for validAddr, expectedAddr := range valids { - if addr, err := ParseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { - t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) - } - } -} - -func TestParseInvalidUnixAddrInvalid(t *testing.T) { - if _, err := ParseUnixAddr("tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { - t.Fatalf("Expected an error, got %v", err) - } - if _, err := ParseUnixAddr("unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { - t.Fatalf("Expected an error, got %v", err) - } - if v, err := ParseUnixAddr("", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { - t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") - } -} - -func TestParseRepositoryTag(t *testing.T) { - if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) - } - if repo, digest := ParseRepositoryTag("root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "root" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { - t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "root", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) - } - if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) - } - if repo, digest := ParseRepositoryTag("user/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "user/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { - t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "user/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) - } - if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) - } - if repo, digest := ParseRepositoryTag("url:5000/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "url:5000/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { - t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "url:5000/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) - } -} - -func TestParseKeyValueOpt(t *testing.T) { - invalids := map[string]string{ - "": "Unable to parse key/value option: ", - "key": "Unable to parse key/value option: key", - } - for invalid, expectedError := range invalids { - if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { - t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) - } - } - valids := map[string][]string{ - "key=value": {"key", "value"}, - " key = value ": {"key", "value"}, - "key=value1=value2": {"key", "value1=value2"}, - " key = value1 = value2 ": {"key", "value1 = value2"}, - } - for valid, expectedKeyValue := range valids { - key, value, err := ParseKeyValueOpt(valid) - if err != nil { - t.Fatal(err) - } - if key != expectedKeyValue[0] || value != expectedKeyValue[1] { - t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) - } - } -} - -func TestParsePortRange(t *testing.T) { - if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 { - t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end) - } -} - -func TestParsePortRangeEmpty(t *testing.T) { - if _, _, err := ParsePortRange(""); err == nil || err.Error() != "Empty string specified for ports." { - t.Fatalf("Expected error 'Empty string specified for ports.', got %v", err) - } -} - -func TestParsePortRangeWithNoRange(t *testing.T) { - start, end, err := ParsePortRange("8080") - if err != nil { - t.Fatal(err) - } - if start != 8080 || end != 8080 { - t.Fatalf("Expected start and end to be the same and equal to 8080, but were %v and %v", start, end) - } -} - -func TestParsePortRangeIncorrectRange(t *testing.T) { - if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} - -func TestParsePortRangeIncorrectEndRange(t *testing.T) { - if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } - - if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} - -func TestParsePortRangeIncorrectStartRange(t *testing.T) { - if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } - - if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} - -func TestParseLink(t *testing.T) { - name, alias, err := ParseLink("name:alias") - if err != nil { - t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) - } - if name != "name" { - t.Fatalf("Link name should have been name, got %s instead", name) - } - if alias != "alias" { - t.Fatalf("Link alias should have been alias, got %s instead", alias) - } - // short format definition - name, alias, err = ParseLink("name") - if err != nil { - t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) - } - if name != "name" { - t.Fatalf("Link name should have been name, got %s instead", name) - } - if alias != "name" { - t.Fatalf("Link alias should have been name, got %s instead", alias) - } - // empty string link definition is not allowed - if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { - t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) - } - // more than two colons are not allowed - if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { - t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) - } -} - -func TestParseUintList(t *testing.T) { - valids := map[string]map[int]bool{ - "": {}, - "7": {7: true}, - "1-6": {1: true, 2: true, 3: true, 4: true, 5: true, 6: true}, - "0-7": {0: true, 1: true, 2: true, 3: true, 4: true, 5: true, 6: true, 7: true}, - "0,3-4,7,8-10": {0: true, 3: true, 4: true, 7: true, 8: true, 9: true, 10: true}, - "0-0,0,1-4": {0: true, 1: true, 2: true, 3: true, 4: true}, - "03,1-3": {1: true, 2: true, 3: true}, - "3,2,1": {1: true, 2: true, 3: true}, - "0-2,3,1": {0: true, 1: true, 2: true, 3: true}, - } - for k, v := range valids { - out, err := ParseUintList(k) - if err != nil { - t.Fatalf("Expected not to fail, got %v", err) - } - if !reflect.DeepEqual(out, v) { - t.Fatalf("Expected %v, got %v", v, out) - } - } - - invalids := []string{ - "this", - "1--", - "1-10,,10", - "10-1", - "-1", - "-1,0", - } - for _, v := range invalids { - if out, err := ParseUintList(v); err == nil { - t.Fatalf("Expected failure with %s but got %v", v, out) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go new file mode 100644 index 00000000..58cc4017 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go @@ -0,0 +1,50 @@ +// Package pidfile provides structure and helper functions to create and remove +// PID file. A PID file is usually a file used to store the process ID of a +// running process. +package pidfile + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" +) + +// PIDFile is a file used to store the process ID of a running process. +type PIDFile struct { + path string +} + +func checkPIDFileAlreadyExists(path string) error { + if pidByte, err := ioutil.ReadFile(path); err == nil { + pidString := strings.TrimSpace(string(pidByte)) + if pid, err := strconv.Atoi(pidString); err == nil { + if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil { + return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path) + } + } + } + return nil +} + +// New creates a PIDfile using the specified path. +func New(path string) (*PIDFile, error) { + if err := checkPIDFileAlreadyExists(path); err != nil { + return nil, err + } + if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { + return nil, err + } + + return &PIDFile{path: path}, nil +} + +// Remove removes the PIDFile. +func (file PIDFile) Remove() error { + if err := os.Remove(file.path); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_freebsd.go b/vendor/github.com/docker/docker/pkg/platform/architecture_freebsd.go new file mode 100644 index 00000000..992987e4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_freebsd.go @@ -0,0 +1,15 @@ +package platform + +import ( + "github.com/docker/containerd/subreaper/exec" +) + +// runtimeArchitecture get the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + cmd := exec.Command("uname", "-m") + machine, err := cmd.Output() + if err != nil { + return "", err + } + return string(machine), nil +} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go new file mode 100644 index 00000000..30c58c78 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go @@ -0,0 +1,16 @@ +// Package platform provides helper function to get the runtime architecture +// for different platforms. +package platform + +import ( + "syscall" +) + +// runtimeArchitecture get the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + utsname := &syscall.Utsname{} + if err := syscall.Uname(utsname); err != nil { + return "", err + } + return charsToString(utsname.Machine), nil +} diff --git a/vendor/github.com/docker/docker/pkg/platform/platform.go b/vendor/github.com/docker/docker/pkg/platform/platform.go new file mode 100644 index 00000000..59e25295 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/platform.go @@ -0,0 +1,23 @@ +package platform + +import ( + "runtime" + + "github.com/Sirupsen/logrus" +) + +var ( + // Architecture holds the runtime architecture of the process. + Architecture string + // OSType holds the runtime operating system type (Linux, …) of the process. + OSType string +) + +func init() { + var err error + Architecture, err = runtimeArchitecture() + if err != nil { + logrus.Errorf("Could no read system architecture info: %v", err) + } + OSType = runtime.GOOS +} diff --git a/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go b/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go new file mode 100644 index 00000000..5dcbadfd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go @@ -0,0 +1,18 @@ +// +build linux,386 linux,amd64 linux,arm64 +// see golang's sources src/syscall/ztypes_linux_*.go that use int8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of signed int8 +func charsToString(ca [65]int8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = uint8(ca[lens]) + } + return string(s[0:lens]) +} diff --git a/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go b/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go new file mode 100644 index 00000000..c9875cf6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go @@ -0,0 +1,18 @@ +// +build linux,arm linux,ppc64 linux,ppc64le s390x +// see golang's sources src/syscall/ztypes_linux_*.go that use uint8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of unsigned uint8 +func charsToString(ca [65]uint8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = ca[lens] + } + return string(s[0:lens]) +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/client.go b/vendor/github.com/docker/docker/pkg/plugins/client.go new file mode 100644 index 00000000..e3fd326e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/client.go @@ -0,0 +1,186 @@ +package plugins + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + defaultTimeOut = 30 +) + +// NewClient creates a new plugin client (http). +func NewClient(addr string, tlsConfig tlsconfig.Options) (*Client, error) { + tr := &http.Transport{} + + c, err := tlsconfig.Client(tlsConfig) + if err != nil { + return nil, err + } + tr.TLSClientConfig = c + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + socket := u.Host + if socket == "" { + // valid local socket addresses have the host empty. + socket = u.Path + } + if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { + return nil, err + } + scheme := httpScheme(u) + + clientTransport := transport.NewHTTPTransport(tr, scheme, socket) + return NewClientWithTransport(clientTransport), nil +} + +// NewClientWithTransport creates a new plugin client with a given transport. +func NewClientWithTransport(tr transport.Transport) *Client { + return &Client{ + http: &http.Client{ + Transport: tr, + }, + requestFactory: tr, + } +} + +// Client represents a plugin client. +type Client struct { + http *http.Client // http client to use + requestFactory transport.RequestFactory +} + +// Call calls the specified method with the specified arguments for the plugin. +// It will retry for 30 seconds if a failure occurs when calling. +func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { + var buf bytes.Buffer + if args != nil { + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return err + } + } + body, err := c.callWithRetry(serviceMethod, &buf, true) + if err != nil { + return err + } + defer body.Close() + if ret != nil { + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + } + return nil +} + +// Stream calls the specified method with the specified arguments for the plugin and returns the response body +func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return nil, err + } + return c.callWithRetry(serviceMethod, &buf, true) +} + +// SendFile calls the specified method, and passes through the IO stream +func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { + body, err := c.callWithRetry(serviceMethod, data, true) + if err != nil { + return err + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + return nil +} + +func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { + req, err := c.requestFactory.NewRequest(serviceMethod, data) + if err != nil { + return nil, err + } + + var retries int + start := time.Now() + + for { + resp, err := c.http.Do(req) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to connect to plugin: %s:%s, retrying in %v", req.URL.Host, req.URL.Path, timeOff) + time.Sleep(timeOff) + continue + } + + if resp.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} + } + + // Plugins' Response(s) should have an Err field indicating what went + // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just + // return the string(body) + type responseErr struct { + Err string + } + remoteErr := responseErr{} + if err := json.Unmarshal(b, &remoteErr); err == nil { + if remoteErr.Err != "" { + return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} + } + } + // old way... + return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} + } + return resp.Body, nil + } +} + +func backoff(retries int) time.Duration { + b, max := 1, defaultTimeOut + for b < max && retries > 0 { + b *= 2 + retries-- + } + if b > max { + b = max + } + return time.Duration(b) * time.Second +} + +func abort(start time.Time, timeOff time.Duration) bool { + return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second +} + +func httpScheme(u *url.URL) string { + scheme := u.Scheme + if scheme != "https" { + scheme = "http" + } + return scheme +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/vendor/github.com/docker/docker/pkg/plugins/discovery.go new file mode 100644 index 00000000..9dc64194 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery.go @@ -0,0 +1,132 @@ +package plugins + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "sync" +) + +var ( + // ErrNotFound plugin not found + ErrNotFound = errors.New("plugin not found") + socketsPath = "/run/docker/plugins" + specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} +) + +// localRegistry defines a registry that is local (using unix socket). +type localRegistry struct{} + +func newLocalRegistry() localRegistry { + return localRegistry{} +} + +// Scan scans all the plugin paths and returns all the names it found +func Scan() ([]string, error) { + var names []string + if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return nil + } + + if fi.Mode()&os.ModeSocket != 0 { + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + } + return nil + }); err != nil { + return nil, err + } + + for _, path := range specsPaths { + if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { + if err != nil || fi.IsDir() { + return nil + } + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + return nil + }); err != nil { + return nil, err + } + } + return names, nil +} + +// Plugin returns the plugin registered with the given name (or returns an error). +func (l *localRegistry) Plugin(name string) (*Plugin, error) { + socketpaths := pluginPaths(socketsPath, name, ".sock") + + for _, p := range socketpaths { + if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { + return newLocalPlugin(name, "unix://"+p), nil + } + } + + var txtspecpaths []string + for _, p := range specsPaths { + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) + } + + for _, p := range txtspecpaths { + if _, err := os.Stat(p); err == nil { + if strings.HasSuffix(p, ".json") { + return readPluginJSONInfo(name, p) + } + return readPluginInfo(name, p) + } + } + return nil, ErrNotFound +} + +func readPluginInfo(name, path string) (*Plugin, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + addr := strings.TrimSpace(string(content)) + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + + if len(u.Scheme) == 0 { + return nil, fmt.Errorf("Unknown protocol") + } + + return newLocalPlugin(name, addr), nil +} + +func readPluginJSONInfo(name, path string) (*Plugin, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var p Plugin + if err := json.NewDecoder(f).Decode(&p); err != nil { + return nil, err + } + p.Name = name + if len(p.TLSConfig.CAFile) == 0 { + p.TLSConfig.InsecureSkipVerify = true + } + p.activateWait = sync.NewCond(&sync.Mutex{}) + + return &p, nil +} + +func pluginPaths(base, name, ext string) []string { + return []string{ + filepath.Join(base, name+ext), + filepath.Join(base, name, name+ext), + } +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/errors.go b/vendor/github.com/docker/docker/pkg/plugins/errors.go new file mode 100644 index 00000000..79884710 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/errors.go @@ -0,0 +1,33 @@ +package plugins + +import ( + "fmt" + "net/http" +) + +type statusError struct { + status int + method string + err string +} + +// Error returns a formatted string for this error type +func (e *statusError) Error() string { + return fmt.Sprintf("%s: %v", e.method, e.err) +} + +// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin +func IsNotFound(err error) bool { + return isStatusError(err, http.StatusNotFound) +} + +func isStatusError(err error, status int) bool { + if err == nil { + return false + } + e, ok := err.(*statusError) + if !ok { + return false + } + return e.status == status +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go new file mode 100644 index 00000000..4f270a40 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins.go @@ -0,0 +1,257 @@ +// Package plugins provides structures and helper functions to manage Docker +// plugins. +// +// Docker discovers plugins by looking for them in the plugin directory whenever +// a user or container tries to use one by name. UNIX domain socket files must +// be located under /run/docker/plugins, whereas spec files can be located +// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled +// by the Registry interface, which lets you list all plugins or get a plugin by +// its name if it exists. +// +// The plugins need to implement an HTTP server and bind this to the UNIX socket +// or the address specified in the spec files. +// A handshake is send at /Plugin.Activate, and plugins are expected to return +// a Manifest with a list of of Docker subsystems which this plugin implements. +// +// In order to use a plugins, you can use the ``Get`` with the name of the +// plugin and the subsystem it implements. +// +// plugin, err := plugins.Get("example", "VolumeDriver") +// if err != nil { +// return fmt.Errorf("Error looking up volume plugin example: %v", err) +// } +package plugins + +import ( + "errors" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + // ErrNotImplements is returned if the plugin does not implement the requested driver. + ErrNotImplements = errors.New("Plugin does not implement the requested driver") +) + +type plugins struct { + sync.Mutex + plugins map[string]*Plugin +} + +var ( + storage = plugins{plugins: make(map[string]*Plugin)} + extpointHandlers = make(map[string]func(string, *Client)) +) + +// Manifest lists what a plugin implements. +type Manifest struct { + // List of subsystem the plugin implements. + Implements []string +} + +// Plugin is the definition of a docker plugin. +type Plugin struct { + // Name of the plugin + Name string `json:"-"` + // Address of the plugin + Addr string + // TLS configuration of the plugin + TLSConfig tlsconfig.Options + // Client attached to the plugin + Client *Client `json:"-"` + // Manifest of the plugin (see above) + Manifest *Manifest `json:"-"` + + // error produced by activation + activateErr error + // specifies if the activation sequence is completed (not if it is sucessful or not) + activated bool + // wait for activation to finish + activateWait *sync.Cond +} + +func newLocalPlugin(name, addr string) *Plugin { + return &Plugin{ + Name: name, + Addr: addr, + TLSConfig: tlsconfig.Options{InsecureSkipVerify: true}, + activateWait: sync.NewCond(&sync.Mutex{}), + } +} + +func (p *Plugin) activate() error { + p.activateWait.L.Lock() + if p.activated { + p.activateWait.L.Unlock() + return p.activateErr + } + + p.activateErr = p.activateWithLock() + p.activated = true + + p.activateWait.L.Unlock() + p.activateWait.Broadcast() + return p.activateErr +} + +func (p *Plugin) activateWithLock() error { + c, err := NewClient(p.Addr, p.TLSConfig) + if err != nil { + return err + } + p.Client = c + + m := new(Manifest) + if err = p.Client.Call("Plugin.Activate", nil, m); err != nil { + return err + } + + p.Manifest = m + + for _, iface := range m.Implements { + handler, handled := extpointHandlers[iface] + if !handled { + continue + } + handler(p.Name, p.Client) + } + return nil +} + +func (p *Plugin) waitActive() error { + p.activateWait.L.Lock() + for !p.activated { + p.activateWait.Wait() + } + p.activateWait.L.Unlock() + return p.activateErr +} + +func (p *Plugin) implements(kind string) bool { + if err := p.waitActive(); err != nil { + return false + } + for _, driver := range p.Manifest.Implements { + if driver == kind { + return true + } + } + return false +} + +func load(name string) (*Plugin, error) { + return loadWithRetry(name, true) +} + +func loadWithRetry(name string, retry bool) (*Plugin, error) { + registry := newLocalRegistry() + start := time.Now() + + var retries int + for { + pl, err := registry.Plugin(name) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) + time.Sleep(timeOff) + continue + } + + storage.Lock() + storage.plugins[name] = pl + storage.Unlock() + + err = pl.activate() + + if err != nil { + storage.Lock() + delete(storage.plugins, name) + storage.Unlock() + } + + return pl, err + } +} + +func get(name string) (*Plugin, error) { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + return pl, pl.activate() + } + return load(name) +} + +// Get returns the plugin given the specified name and requested implementation. +func Get(name, imp string) (*Plugin, error) { + pl, err := get(name) + if err != nil { + return nil, err + } + if pl.implements(imp) { + logrus.Debugf("%s implements: %s", name, imp) + return pl, nil + } + return nil, ErrNotImplements +} + +// Handle adds the specified function to the extpointHandlers. +func Handle(iface string, fn func(string, *Client)) { + extpointHandlers[iface] = fn +} + +// GetAll returns all the plugins for the specified implementation +func GetAll(imp string) ([]*Plugin, error) { + pluginNames, err := Scan() + if err != nil { + return nil, err + } + + type plLoad struct { + pl *Plugin + err error + } + + chPl := make(chan *plLoad, len(pluginNames)) + var wg sync.WaitGroup + for _, name := range pluginNames { + if pl, ok := storage.plugins[name]; ok { + chPl <- &plLoad{pl, nil} + continue + } + + wg.Add(1) + go func(name string) { + defer wg.Done() + pl, err := loadWithRetry(name, false) + chPl <- &plLoad{pl, err} + }(name) + } + + wg.Wait() + close(chPl) + + var out []*Plugin + for pl := range chPl { + if pl.err != nil { + logrus.Error(pl.err) + continue + } + if pl.pl.implements(imp) { + out = append(out, pl.pl) + } + } + return out, nil +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go new file mode 100644 index 00000000..5be146af --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" +) + +// httpTransport holds an http.RoundTripper +// and information about the scheme and address the transport +// sends request to. +type httpTransport struct { + http.RoundTripper + scheme string + addr string +} + +// NewHTTPTransport creates a new httpTransport. +func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { + return httpTransport{ + RoundTripper: r, + scheme: scheme, + addr: addr, + } +} + +// NewRequest creates a new http.Request and sets the URL +// scheme and address with the transport's fields. +func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { + req, err := newHTTPRequest(path, data) + if err != nil { + return nil, err + } + req.URL.Scheme = t.scheme + req.URL.Host = t.addr + return req, nil +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go new file mode 100644 index 00000000..d7f1e210 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" + "strings" +) + +// VersionMimetype is the Content-Type the engine sends to plugins. +const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" + +// RequestFactory defines an interface that +// transports can implement to create new requests. +type RequestFactory interface { + NewRequest(path string, data io.Reader) (*http.Request, error) +} + +// Transport defines an interface that plugin transports +// must implement. +type Transport interface { + http.RoundTripper + RequestFactory +} + +// newHTTPRequest creates a new request with a path and a body. +func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + req, err := http.NewRequest("POST", path, data) + if err != nil { + return nil, err + } + req.Header.Add("Accept", VersionMimetype) + return req, nil +} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools_test.go b/vendor/github.com/docker/docker/pkg/pools/pools_test.go deleted file mode 100644 index 78689800..00000000 --- a/vendor/github.com/docker/docker/pkg/pools/pools_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package pools - -import ( - "bufio" - "bytes" - "io" - "strings" - "testing" -) - -func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { - reader := BufioReader32KPool.Get(nil) - if reader == nil { - t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") - } -} - -func TestBufioReaderPoolPutAndGet(t *testing.T) { - sr := bufio.NewReader(strings.NewReader("foobar")) - reader := BufioReader32KPool.Get(sr) - if reader == nil { - t.Fatalf("BufioReaderPool should not return a nil reader.") - } - // verify the first 3 byte - buf1 := make([]byte, 3) - _, err := reader.Read(buf1) - if err != nil { - t.Fatal(err) - } - if actual := string(buf1); actual != "foo" { - t.Fatalf("The first letter should have been 'foo' but was %v", actual) - } - BufioReader32KPool.Put(reader) - // Try to read the next 3 bytes - _, err = sr.Read(make([]byte, 3)) - if err == nil || err != io.EOF { - t.Fatalf("The buffer should have been empty, issue an EOF error.") - } -} - -type simpleReaderCloser struct { - io.Reader - closed bool -} - -func (r *simpleReaderCloser) Close() error { - r.closed = true - return nil -} - -func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { - br := bufio.NewReader(strings.NewReader("")) - sr := &simpleReaderCloser{ - Reader: strings.NewReader("foobar"), - closed: false, - } - reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) - if reader == nil { - t.Fatalf("NewReadCloserWrapper should not return a nil reader.") - } - // Verify the content of reader - buf := make([]byte, 3) - _, err := reader.Read(buf) - if err != nil { - t.Fatal(err) - } - if actual := string(buf); actual != "foo" { - t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) - } - reader.Close() - // Read 3 more bytes "bar" - _, err = reader.Read(buf) - if err != nil { - t.Fatal(err) - } - if actual := string(buf); actual != "bar" { - t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) - } - if !sr.closed { - t.Fatalf("The ReaderCloser should have been closed, it is not.") - } -} - -func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { - writer := BufioWriter32KPool.Get(nil) - if writer == nil { - t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") - } -} - -func TestBufioWriterPoolPutAndGet(t *testing.T) { - buf := new(bytes.Buffer) - bw := bufio.NewWriter(buf) - writer := BufioWriter32KPool.Get(bw) - if writer == nil { - t.Fatalf("BufioReaderPool should not return a nil writer.") - } - written, err := writer.Write([]byte("foobar")) - if err != nil { - t.Fatal(err) - } - if written != 6 { - t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) - } - // Make sure we Flush all the way ? - writer.Flush() - bw.Flush() - if len(buf.Bytes()) != 6 { - t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) - } - // Reset the buffer - buf.Reset() - BufioWriter32KPool.Put(writer) - // Try to write something - written, err = writer.Write([]byte("barfoo")) - if err != nil { - t.Fatal(err) - } - // If we now try to flush it, it should panic (the writer is nil) - // recover it - defer func() { - if r := recover(); r == nil { - t.Fatal("Trying to flush the writter should have 'paniced', did not.") - } - }() - writer.Flush() -} - -type simpleWriterCloser struct { - io.Writer - closed bool -} - -func (r *simpleWriterCloser) Close() error { - r.closed = true - return nil -} - -func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { - buf := new(bytes.Buffer) - bw := bufio.NewWriter(buf) - sw := &simpleWriterCloser{ - Writer: new(bytes.Buffer), - closed: false, - } - bw.Flush() - writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) - if writer == nil { - t.Fatalf("BufioReaderPool should not return a nil writer.") - } - written, err := writer.Write([]byte("foobar")) - if err != nil { - t.Fatal(err) - } - if written != 6 { - t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) - } - writer.Close() - if !sw.closed { - t.Fatalf("The ReaderCloser should have been closed, it is not.") - } -} diff --git a/vendor/github.com/docker/docker/pkg/progress/progress.go b/vendor/github.com/docker/docker/pkg/progress/progress.go new file mode 100644 index 00000000..61315cb8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/progress/progress.go @@ -0,0 +1,73 @@ +package progress + +import ( + "fmt" +) + +// Progress represents the progress of a transfer. +type Progress struct { + ID string + + // Progress contains a Message or... + Message string + + // ...progress of an action + Action string + Current int64 + Total int64 + + // Aux contains extra information not presented to the user, such as + // digests for push signing. + Aux interface{} + + LastUpdate bool +} + +// Output is an interface for writing progress information. It's +// like a writer for progress, but we don't call it Writer because +// that would be confusing next to ProgressReader (also, because it +// doesn't implement the io.Writer interface). +type Output interface { + WriteProgress(Progress) error +} + +type chanOutput chan<- Progress + +func (out chanOutput) WriteProgress(p Progress) error { + out <- p + return nil +} + +// ChanOutput returns a Output that writes progress updates to the +// supplied channel. +func ChanOutput(progressChan chan<- Progress) Output { + return chanOutput(progressChan) +} + +// Update is a convenience function to write a progress update to the channel. +func Update(out Output, id, action string) { + out.WriteProgress(Progress{ID: id, Action: action}) +} + +// Updatef is a convenience function to write a printf-formatted progress update +// to the channel. +func Updatef(out Output, id, format string, a ...interface{}) { + Update(out, id, fmt.Sprintf(format, a...)) +} + +// Message is a convenience function to write a progress message to the channel. +func Message(out Output, id, message string) { + out.WriteProgress(Progress{ID: id, Message: message}) +} + +// Messagef is a convenience function to write a printf-formatted progress +// message to the channel. +func Messagef(out Output, id, format string, a ...interface{}) { + Message(out, id, fmt.Sprintf(format, a...)) +} + +// Aux sends auxiliary information over a progress interface, which will not be +// formatted for the UI. This is used for things such as push signing. +func Aux(out Output, a interface{}) { + out.WriteProgress(Progress{Aux: a}) +} diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader.go b/vendor/github.com/docker/docker/pkg/progress/progressreader.go new file mode 100644 index 00000000..c39e2b69 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/progress/progressreader.go @@ -0,0 +1,59 @@ +package progress + +import ( + "io" +) + +// Reader is a Reader with progress bar. +type Reader struct { + in io.ReadCloser // Stream to read from + out Output // Where to send progress bar to + size int64 + current int64 + lastUpdate int64 + id string + action string +} + +// NewProgressReader creates a new ProgressReader. +func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { + return &Reader{ + in: in, + out: out, + size: size, + id: id, + action: action, + } +} + +func (p *Reader) Read(buf []byte) (n int, err error) { + read, err := p.in.Read(buf) + p.current += int64(read) + updateEvery := int64(1024 * 512) //512kB + if p.size > 0 { + // Update progress for every 1% read if 1% < 512kB + if increment := int64(0.01 * float64(p.size)); increment < updateEvery { + updateEvery = increment + } + } + if p.current-p.lastUpdate > updateEvery || err != nil { + p.updateProgress(err != nil && read == 0) + p.lastUpdate = p.current + } + + return read, err +} + +// Close closes the progress reader and its underlying reader. +func (p *Reader) Close() error { + if p.current < p.size { + // print a full progress bar when closing prematurely + p.current = p.size + p.updateProgress(false) + } + return p.in.Close() +} + +func (p *Reader) updateProgress(last bool) { + p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) +} diff --git a/vendor/github.com/docker/docker/pkg/pubsub/publisher.go b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go new file mode 100644 index 00000000..09364617 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go @@ -0,0 +1,111 @@ +package pubsub + +import ( + "sync" + "time" +) + +var wgPool = sync.Pool{New: func() interface{} { return new(sync.WaitGroup) }} + +// NewPublisher creates a new pub/sub publisher to broadcast messages. +// The duration is used as the send timeout as to not block the publisher publishing +// messages to other clients if one client is slow or unresponsive. +// The buffer is used when creating new channels for subscribers. +func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { + return &Publisher{ + buffer: buffer, + timeout: publishTimeout, + subscribers: make(map[subscriber]topicFunc), + } +} + +type subscriber chan interface{} +type topicFunc func(v interface{}) bool + +// Publisher is basic pub/sub structure. Allows to send events and subscribe +// to them. Can be safely used from multiple goroutines. +type Publisher struct { + m sync.RWMutex + buffer int + timeout time.Duration + subscribers map[subscriber]topicFunc +} + +// Len returns the number of subscribers for the publisher +func (p *Publisher) Len() int { + p.m.RLock() + i := len(p.subscribers) + p.m.RUnlock() + return i +} + +// Subscribe adds a new subscriber to the publisher returning the channel. +func (p *Publisher) Subscribe() chan interface{} { + return p.SubscribeTopic(nil) +} + +// SubscribeTopic adds a new subscriber that filters messages sent by a topic. +func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { + ch := make(chan interface{}, p.buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + +// Evict removes the specified subscriber from receiving any more messages. +func (p *Publisher) Evict(sub chan interface{}) { + p.m.Lock() + delete(p.subscribers, sub) + close(sub) + p.m.Unlock() +} + +// Publish sends the data in v to all subscribers currently registered with the publisher. +func (p *Publisher) Publish(v interface{}) { + p.m.RLock() + if len(p.subscribers) == 0 { + p.m.RUnlock() + return + } + + wg := wgPool.Get().(*sync.WaitGroup) + for sub, topic := range p.subscribers { + wg.Add(1) + go p.sendTopic(sub, topic, v, wg) + } + wg.Wait() + wgPool.Put(wg) + p.m.RUnlock() +} + +// Close closes the channels to all subscribers registered with the publisher. +func (p *Publisher) Close() { + p.m.Lock() + for sub := range p.subscribers { + delete(p.subscribers, sub) + close(sub) + } + p.m.Unlock() +} + +func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) { + defer wg.Done() + if topic != nil && !topic(v) { + return + } + + // send under a select as to not block if the receiver is unavailable + if p.timeout > 0 { + select { + case sub <- v: + case <-time.After(p.timeout): + } + return + } + + select { + case sub <- v: + default: + } +} diff --git a/vendor/github.com/docker/docker/pkg/random/random.go b/vendor/github.com/docker/docker/pkg/random/random.go index e560aff1..70de4d13 100644 --- a/vendor/github.com/docker/docker/pkg/random/random.go +++ b/vendor/github.com/docker/docker/pkg/random/random.go @@ -10,7 +10,7 @@ import ( "time" ) -// Rand is a global *rand.Rand instance, which initilized with NewSource() source. +// Rand is a global *rand.Rand instance, which initialized with NewSource() source. var Rand = rand.New(NewSource()) // Reader is a global, shared instance of a pseudorandom bytes generator. diff --git a/vendor/github.com/docker/docker/pkg/random/random_test.go b/vendor/github.com/docker/docker/pkg/random/random_test.go deleted file mode 100644 index cf405f78..00000000 --- a/vendor/github.com/docker/docker/pkg/random/random_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package random - -import ( - "math/rand" - "sync" - "testing" -) - -// for go test -v -race -func TestConcurrency(t *testing.T) { - rnd := rand.New(NewSource()) - var wg sync.WaitGroup - - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - rnd.Int63() - wg.Done() - }() - } - wg.Wait() -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_freebsd.go b/vendor/github.com/docker/docker/pkg/reexec/command_freebsd.go index c7f797a5..a540810a 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/command_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_freebsd.go @@ -3,7 +3,7 @@ package reexec import ( - "os/exec" + "github.com/docker/containerd/subreaper/exec" ) // Self returns the path to the current process's binary. diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go index 3c3a73a9..5f9d1394 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go @@ -3,7 +3,8 @@ package reexec import ( - "os/exec" + osExec "os/exec" + "github.com/docker/containerd/subreaper/exec" "syscall" ) @@ -19,10 +20,12 @@ func Self() string { // it is thus safe to delete or replace the on-disk binary (os.Args[0]). func Command(args ...string) *exec.Cmd { return &exec.Cmd{ - Path: Self(), - Args: args, - SysProcAttr: &syscall.SysProcAttr{ - Pdeathsig: syscall.SIGTERM, + Cmd: osExec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, }, } } diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go index ad4ea38e..7f1a99cf 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go @@ -3,7 +3,7 @@ package reexec import ( - "os/exec" + "github.com/docker/containerd/subreaper/exec" ) // Command is unsupported on operating systems apart from Linux and Windows. diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go deleted file mode 100644 index 8d65e0ae..00000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build windows - -package reexec - -import ( - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which have Path as current binary. -// For example if current binary is "docker.exe" at "C:\", then cmd.Path will -// be set to "C:\docker.exe". -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go index 20491e05..78ecce63 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/reexec.go +++ b/vendor/github.com/docker/docker/pkg/reexec/reexec.go @@ -3,8 +3,10 @@ package reexec import ( "fmt" "os" - "os/exec" + "path" "path/filepath" + + "github.com/docker/containerd/subreaper/exec" ) var registeredInitializers = make(map[string]func()) @@ -22,6 +24,9 @@ func Register(name string, initializer func()) { // initialization function was called. func Init() bool { initializer, exists := registeredInitializers[os.Args[0]] + if !exists { + initializer, exists = registeredInitializers[path.Base(os.Args[0])] + } if exists { initializer() @@ -41,7 +46,7 @@ func naiveSelf() string { if absName, err := filepath.Abs(name); err == nil { return absName } - // if we coudn't get absolute name, return original + // if we couldn't get absolute name, return original // (NOTE: Go only errors on Abs() if os.Getwd fails) return name } diff --git a/vendor/github.com/docker/docker/pkg/registrar/registrar.go b/vendor/github.com/docker/docker/pkg/registrar/registrar.go new file mode 100644 index 00000000..8910197f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/registrar/registrar.go @@ -0,0 +1,127 @@ +// Package registrar provides name registration. It reserves a name to a given key. +package registrar + +import ( + "errors" + "sync" +) + +var ( + // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved + ErrNameReserved = errors.New("name is reserved") + // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved + ErrNameNotReserved = errors.New("name is not reserved") + // ErrNoSuchKey is returned when trying to find the names for a key which is not known + ErrNoSuchKey = errors.New("provided key does not exist") +) + +// Registrar stores indexes a list of keys and their registered names as well as indexes names and the key that they are registred to +// Names must be unique. +// Registrar is safe for concurrent access. +type Registrar struct { + idx map[string][]string + names map[string]string + mu sync.Mutex +} + +// NewRegistrar creates a new Registrar with the an empty index +func NewRegistrar() *Registrar { + return &Registrar{ + idx: make(map[string][]string), + names: make(map[string]string), + } +} + +// Reserve registers a key to a name +// Reserve is idempotent +// Attempting to reserve a key to a name that already exists results in an `ErrNameReserved` +// A name reservation is globally unique +func (r *Registrar) Reserve(name, key string) error { + r.mu.Lock() + defer r.mu.Unlock() + + if k, exists := r.names[name]; exists { + if k != key { + return ErrNameReserved + } + return nil + } + + r.idx[key] = append(r.idx[key], name) + r.names[name] = key + return nil +} + +// Release releases the reserved name +// Once released, a name can be reserved again +func (r *Registrar) Release(name string) { + r.mu.Lock() + defer r.mu.Unlock() + + key, exists := r.names[name] + if !exists { + return + } + + for i, n := range r.idx[key] { + if n != name { + continue + } + r.idx[key] = append(r.idx[key][:i], r.idx[key][i+1:]...) + break + } + + delete(r.names, name) + + if len(r.idx[key]) == 0 { + delete(r.idx, key) + } +} + +// Delete removes all reservations for the passed in key. +// All names reserved to this key are released. +func (r *Registrar) Delete(key string) { + r.mu.Lock() + for _, name := range r.idx[key] { + delete(r.names, name) + } + delete(r.idx, key) + r.mu.Unlock() +} + +// GetNames lists all the reserved names for the given key +func (r *Registrar) GetNames(key string) ([]string, error) { + r.mu.Lock() + defer r.mu.Unlock() + + names, exists := r.idx[key] + if !exists { + return nil, ErrNoSuchKey + } + return names, nil +} + +// Get returns the key that the passed in name is reserved to +func (r *Registrar) Get(name string) (string, error) { + r.mu.Lock() + key, exists := r.names[name] + r.mu.Unlock() + + if !exists { + return "", ErrNameNotReserved + } + return key, nil +} + +// GetAll returns all registered names +func (r *Registrar) GetAll() map[string][]string { + out := make(map[string][]string) + + r.mu.Lock() + // copy index into out + for id, names := range r.idx { + out[id] = names + } + r.mu.Unlock() + return out +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go index 1ecc3294..d418cbe9 100644 --- a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go +++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go @@ -4,41 +4,77 @@ import ( "syscall" ) +const ( + sigrtmin = 34 + sigrtmax = 64 +) + // SignalMap is a map of Linux signals. var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUS": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CLD": syscall.SIGCLD, - "CONT": syscall.SIGCONT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "PIPE": syscall.SIGPIPE, - "POLL": syscall.SIGPOLL, - "PROF": syscall.SIGPROF, - "PWR": syscall.SIGPWR, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STKFLT": syscall.SIGSTKFLT, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "UNUSED": syscall.SIGUNUSED, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUS": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CLD": syscall.SIGCLD, + "CONT": syscall.SIGCONT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "POLL": syscall.SIGPOLL, + "PROF": syscall.SIGPROF, + "PWR": syscall.SIGPWR, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STKFLT": syscall.SIGSTKFLT, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "UNUSED": syscall.SIGUNUSED, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, } diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go index d4fea931..6621d371 100644 --- a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go +++ b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go @@ -14,6 +14,8 @@ const ( SIGCHLD = syscall.SIGCHLD // SIGWINCH is a signal sent to a process when its controlling terminal changes its size SIGWINCH = syscall.SIGWINCH + // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading + SIGPIPE = syscall.SIGPIPE // DefaultStopSignal is the syscall signal used to stop a container in unix systems. DefaultStopSignal = "SIGTERM" ) diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go deleted file mode 100644 index c80a951c..00000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build windows - -package signal - -import ( - "syscall" -) - -// Signals used in api/client (no windows equivalent, use -// invalid signals so they don't get handled) -const ( - SIGCHLD = syscall.Signal(0xff) - SIGWINCH = syscall.Signal(0xff) - // DefaultStopSignal is the syscall signal used to stop a container in windows systems. - DefaultStopSignal = "15" -) - -// SignalMap is a map of "supported" signals. As per the comment in GOLang's -// ztypes_windows.go: "More invented values for signals". Windows doesn't -// really support signals in any way, shape or form that Unix does. -// -// We have these so that docker kill can be used to gracefully (TERM) and -// forcibly (KILL) terminate a container on Windows. -var SignalMap = map[string]syscall.Signal{ - "KILL": syscall.SIGKILL, - "TERM": syscall.SIGTERM, -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go similarity index 77% rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go rename to vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go index b2c60046..b37ae39f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -3,12 +3,24 @@ package stdcopy import ( "encoding/binary" "errors" + "fmt" "io" - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus" ) +// StdType is the type of standard stream +// a writer can multiplex to. +type StdType byte + const ( + // Stdin represents standard input stream type. + Stdin StdType = iota + // Stdout represents standard output stream type. + Stdout + // Stderr represents standard error steam type. + Stderr + stdWriterPrefixLen = 8 stdWriterFdIndex = 0 stdWriterSizeIndex = 4 @@ -16,38 +28,32 @@ const ( startingBufLen = 32*1024 + stdWriterPrefixLen + 1 ) -// StdType prefixes type and length to standard stream. -type StdType [stdWriterPrefixLen]byte - -var ( - // Stdin represents standard input stream type. - Stdin = StdType{0: 0} - // Stdout represents standard output stream type. - Stdout = StdType{0: 1} - // Stderr represents standard error steam type. - Stderr = StdType{0: 2} -) - -// StdWriter is wrapper of io.Writer with extra customized info. -type StdWriter struct { +// stdWriter is wrapper of io.Writer with extra customized info. +type stdWriter struct { io.Writer - prefix StdType - sizeBuf []byte + prefix byte } -func (w *StdWriter) Write(buf []byte) (n int, err error) { - var n1, n2 int +// Write sends the buffer to the underneath writer. +// It insert the prefix header before the buffer, +// so stdcopy.StdCopy knows where to multiplex the output. +// It makes stdWriter to implement io.Writer. +func (w *stdWriter) Write(buf []byte) (n int, err error) { if w == nil || w.Writer == nil { return 0, errors.New("Writer not instantiated") } - binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) - n1, err = w.Writer.Write(w.prefix[:]) - if err != nil { - n = n1 - stdWriterPrefixLen - } else { - n2, err = w.Writer.Write(buf) - n = n1 + n2 - stdWriterPrefixLen + if buf == nil { + return 0, nil } + + header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} + binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(buf))) + + line := append(header[:], buf...) + + n, err = w.Writer.Write(line) + n -= stdWriterPrefixLen + if n < 0 { n = 0 } @@ -60,16 +66,13 @@ func (w *StdWriter) Write(buf []byte) (n int, err error) { // This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. // `t` indicates the id of the stream to encapsulate. // It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. -func NewStdWriter(w io.Writer, t StdType) *StdWriter { - return &StdWriter{ - Writer: w, - prefix: t, - sizeBuf: make([]byte, 4), +func NewStdWriter(w io.Writer, t StdType) io.Writer { + return &stdWriter{ + Writer: w, + prefix: byte(t), } } -var errInvalidStdHeader = errors.New("Unrecognized input header") - // StdCopy is a modified version of io.Copy. // // StdCopy will demultiplex `src`, assuming that it contains two streams, @@ -110,18 +113,18 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) } // Check the first byte to know where to write - switch buf[stdWriterFdIndex] { - case 0: + switch StdType(buf[stdWriterFdIndex]) { + case Stdin: fallthrough - case 1: + case Stdout: // Write on stdout out = dstout - case 2: + case Stderr: // Write on stderr out = dsterr default: logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex]) - return 0, errInvalidStdHeader + return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) } // Retrieve the size of the frame diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go new file mode 100644 index 00000000..ce6ea79d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go @@ -0,0 +1,172 @@ +// Package streamformatter provides helper functions to format a stream. +package streamformatter + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" +) + +// StreamFormatter formats a stream, optionally using JSON. +type StreamFormatter struct { + json bool +} + +// NewStreamFormatter returns a simple StreamFormatter +func NewStreamFormatter() *StreamFormatter { + return &StreamFormatter{} +} + +// NewJSONStreamFormatter returns a StreamFormatter configured to stream json +func NewJSONStreamFormatter() *StreamFormatter { + return &StreamFormatter{true} +} + +const streamNewline = "\r\n" + +var streamNewlineBytes = []byte(streamNewline) + +// FormatStream formats the specified stream. +func (sf *StreamFormatter) FormatStream(str string) []byte { + if sf.json { + b, err := json.Marshal(&jsonmessage.JSONMessage{Stream: str}) + if err != nil { + return sf.FormatError(err) + } + return append(b, streamNewlineBytes...) + } + return []byte(str + "\r") +} + +// FormatStatus formats the specified objects according to the specified format (and id). +func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { + str := fmt.Sprintf(format, a...) + if sf.json { + b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) + if err != nil { + return sf.FormatError(err) + } + return append(b, streamNewlineBytes...) + } + return []byte(str + streamNewline) +} + +// FormatError formats the specified error. +func (sf *StreamFormatter) FormatError(err error) []byte { + if sf.json { + jsonError, ok := err.(*jsonmessage.JSONError) + if !ok { + jsonError = &jsonmessage.JSONError{Message: err.Error()} + } + if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { + return append(b, streamNewlineBytes...) + } + return []byte("{\"error\":\"format error\"}" + streamNewline) + } + return []byte("Error: " + err.Error() + streamNewline) +} + +// FormatProgress formats the progress information for a specified action. +func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + if sf.json { + var auxJSON *json.RawMessage + if aux != nil { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return nil + } + auxJSON = new(json.RawMessage) + *auxJSON = auxJSONBytes + } + b, err := json.Marshal(&jsonmessage.JSONMessage{ + Status: action, + ProgressMessage: progress.String(), + Progress: progress, + ID: id, + Aux: auxJSON, + }) + if err != nil { + return nil + } + return append(b, streamNewlineBytes...) + } + endl := "\r" + if progress.String() == "" { + endl += "\n" + } + return []byte(action + " " + progress.String() + endl) +} + +// NewProgressOutput returns a progress.Output object that can be passed to +// progress.NewProgressReader. +func (sf *StreamFormatter) NewProgressOutput(out io.Writer, newLines bool) progress.Output { + return &progressOutput{ + sf: sf, + out: out, + newLines: newLines, + } +} + +type progressOutput struct { + sf *StreamFormatter + out io.Writer + newLines bool +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *progressOutput) WriteProgress(prog progress.Progress) error { + var formatted []byte + if prog.Message != "" { + formatted = out.sf.FormatStatus(prog.ID, prog.Message) + } else { + jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total} + formatted = out.sf.FormatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) + } + _, err := out.out.Write(formatted) + if err != nil { + return err + } + + if out.newLines && prog.LastUpdate { + _, err = out.out.Write(out.sf.FormatStatus("", "")) + return err + } + + return nil +} + +// StdoutFormatter is a streamFormatter that writes to the standard output. +type StdoutFormatter struct { + io.Writer + *StreamFormatter +} + +func (sf *StdoutFormatter) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +// StderrFormatter is a streamFormatter that writes to the standard error. +type StderrFormatter struct { + io.Writer + *StreamFormatter +} + +func (sf *StderrFormatter) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go index 266a74ba..02d2594e 100644 --- a/vendor/github.com/docker/docker/pkg/stringid/stringid.go +++ b/vendor/github.com/docker/docker/pkg/stringid/stringid.go @@ -7,6 +7,7 @@ import ( "io" "regexp" "strconv" + "strings" "github.com/docker/docker/pkg/random" ) @@ -25,6 +26,9 @@ func IsShortID(id string) bool { // In case of a collision a lookup with TruncIndex.Get() will fail, and the caller // will need to use a langer prefix, or the full-length Id. func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } trimTo := shortLen if len(id) < shortLen { trimTo = len(id) @@ -44,7 +48,7 @@ func generateID(crypto bool) string { } id := hex.EncodeToString(b) // if we try to parse the truncated for as an int and we don't have - // an error then the value is all numberic and causes issues when + // an error then the value is all numeric and causes issues when // used as a hostname. ref #3869 if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { continue diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go b/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go deleted file mode 100644 index bcb13654..00000000 --- a/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package stringid - -import ( - "strings" - "testing" -) - -func TestGenerateRandomID(t *testing.T) { - id := GenerateRandomID() - - if len(id) != 64 { - t.Fatalf("Id returned is incorrect: %s", id) - } -} - -func TestShortenId(t *testing.T) { - id := GenerateRandomID() - truncID := TruncateID(id) - if len(truncID) != 12 { - t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) - } -} - -func TestShortenIdEmpty(t *testing.T) { - id := "" - truncID := TruncateID(id) - if len(truncID) > len(id) { - t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) - } -} - -func TestShortenIdInvalid(t *testing.T) { - id := "1234" - truncID := TruncateID(id) - if len(truncID) != len(id) { - t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) - } -} - -func TestIsShortIDNonHex(t *testing.T) { - id := "some non-hex value" - if IsShortID(id) { - t.Fatalf("%s is not a short ID", id) - } -} - -func TestIsShortIDNotCorrectSize(t *testing.T) { - id := strings.Repeat("a", shortLen+1) - if IsShortID(id) { - t.Fatalf("%s is not a short ID", id) - } - id = strings.Repeat("a", shortLen-1) - if IsShortID(id) { - t.Fatalf("%s is not a short ID", id) - } -} diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go deleted file mode 100644 index fec59450..00000000 --- a/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package stringutils - -import "testing" - -func testLengthHelper(generator func(int) string, t *testing.T) { - expectedLength := 20 - s := generator(expectedLength) - if len(s) != expectedLength { - t.Fatalf("Length of %s was %d but expected length %d", s, len(s), expectedLength) - } -} - -func testUniquenessHelper(generator func(int) string, t *testing.T) { - repeats := 25 - set := make(map[string]struct{}, repeats) - for i := 0; i < repeats; i = i + 1 { - str := generator(64) - if len(str) != 64 { - t.Fatalf("Id returned is incorrect: %s", str) - } - if _, ok := set[str]; ok { - t.Fatalf("Random number is repeated") - } - set[str] = struct{}{} - } -} - -func isASCII(s string) bool { - for _, c := range s { - if c > 127 { - return false - } - } - return true -} - -func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) { - testLengthHelper(GenerateRandomAlphaOnlyString, t) -} - -func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) { - testUniquenessHelper(GenerateRandomAlphaOnlyString, t) -} - -func TestGenerateRandomAsciiStringLength(t *testing.T) { - testLengthHelper(GenerateRandomASCIIString, t) -} - -func TestGenerateRandomAsciiStringUniqueness(t *testing.T) { - testUniquenessHelper(GenerateRandomASCIIString, t) -} - -func TestGenerateRandomAsciiStringIsAscii(t *testing.T) { - str := GenerateRandomASCIIString(64) - if !isASCII(str) { - t.Fatalf("%s contained non-ascii characters", str) - } -} - -func TestTruncate(t *testing.T) { - str := "teststring" - newstr := Truncate(str, 4) - if newstr != "test" { - t.Fatalf("Expected test, got %s", newstr) - } - newstr = Truncate(str, 20) - if newstr != "teststring" { - t.Fatalf("Expected teststring, got %s", newstr) - } -} - -func TestInSlice(t *testing.T) { - slice := []string{"test", "in", "slice"} - - test := InSlice(slice, "test") - if !test { - t.Fatalf("Expected string test to be in slice") - } - test = InSlice(slice, "SLICE") - if !test { - t.Fatalf("Expected string SLICE to be in slice") - } - test = InSlice(slice, "notinslice") - if test { - t.Fatalf("Expected string notinslice not to be in slice") - } -} - -func TestShellQuoteArgumentsEmpty(t *testing.T) { - actual := ShellQuoteArguments([]string{}) - expected := "" - if actual != expected { - t.Fatalf("Expected an empty string") - } -} - -func TestShellQuoteArguments(t *testing.T) { - simpleString := "simpleString" - complexString := "This is a 'more' complex $tring with some special char *" - actual := ShellQuoteArguments([]string{simpleString, complexString}) - expected := "simpleString 'This is a '\\''more'\\'' complex $tring with some special char *'" - if actual != expected { - t.Fatalf("Expected \"%v\", got \"%v\"", expected, actual) - } -} diff --git a/vendor/github.com/docker/docker/pkg/stringutils/strslice.go b/vendor/github.com/docker/docker/pkg/stringutils/strslice.go deleted file mode 100644 index 40557540..00000000 --- a/vendor/github.com/docker/docker/pkg/stringutils/strslice.go +++ /dev/null @@ -1,71 +0,0 @@ -package stringutils - -import ( - "encoding/json" - "strings" -) - -// StrSlice representes a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice struct { - parts []string -} - -// MarshalJSON Marshals (or serializes) the StrSlice into the json format. -// This method is needed to implement json.Marshaller. -func (e *StrSlice) MarshalJSON() ([]byte, error) { - if e == nil { - return []byte{}, nil - } - return json.Marshal(e.Slice()) -} - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of strings. -// This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - e.parts = p - return nil -} - -// Len returns the number of parts of the StrSlice. -func (e *StrSlice) Len() int { - if e == nil { - return 0 - } - return len(e.parts) -} - -// Slice gets the parts of the StrSlice as a Slice of string. -func (e *StrSlice) Slice() []string { - if e == nil { - return nil - } - return e.parts -} - -// ToString gets space separated string of all the parts. -func (e *StrSlice) ToString() string { - s := e.Slice() - if s == nil { - return "" - } - return strings.Join(s, " ") -} - -// NewStrSlice creates an StrSlice based on the specified parts (as strings). -func NewStrSlice(parts ...string) *StrSlice { - return &StrSlice{parts} -} diff --git a/vendor/github.com/docker/docker/pkg/stringutils/strslice_test.go b/vendor/github.com/docker/docker/pkg/stringutils/strslice_test.go deleted file mode 100644 index a587784e..00000000 --- a/vendor/github.com/docker/docker/pkg/stringutils/strslice_test.go +++ /dev/null @@ -1,135 +0,0 @@ -package stringutils - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestStrSliceMarshalJSON(t *testing.T) { - strss := map[*StrSlice]string{ - nil: "", - &StrSlice{}: "null", - &StrSlice{[]string{"/bin/sh", "-c", "echo"}}: `["/bin/sh","-c","echo"]`, - } - - for strs, expected := range strss { - data, err := strs.MarshalJSON() - if err != nil { - t.Fatal(err) - } - if string(data) != expected { - t.Fatalf("Expected %v, got %v", expected, string(data)) - } - } -} - -func TestStrSliceUnmarshalJSON(t *testing.T) { - parts := map[string][]string{ - "": {"default", "values"}, - "[]": {}, - `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, - } - for json, expectedParts := range parts { - strs := &StrSlice{ - []string{"default", "values"}, - } - if err := strs.UnmarshalJSON([]byte(json)); err != nil { - t.Fatal(err) - } - - actualParts := strs.Slice() - if len(actualParts) != len(expectedParts) { - t.Fatalf("Expected %v parts, got %v (%v)", len(expectedParts), len(actualParts), expectedParts) - } - for index, part := range actualParts { - if part != expectedParts[index] { - t.Fatalf("Expected %v, got %v", expectedParts, actualParts) - break - } - } - } -} - -func TestStrSliceUnmarshalString(t *testing.T) { - var e *StrSlice - echo, err := json.Marshal("echo") - if err != nil { - t.Fatal(err) - } - if err := json.Unmarshal(echo, &e); err != nil { - t.Fatal(err) - } - - slice := e.Slice() - if len(slice) != 1 { - t.Fatalf("expected 1 element after unmarshal: %q", slice) - } - - if slice[0] != "echo" { - t.Fatalf("expected `echo`, got: %q", slice[0]) - } -} - -func TestStrSliceUnmarshalSlice(t *testing.T) { - var e *StrSlice - echo, err := json.Marshal([]string{"echo"}) - if err != nil { - t.Fatal(err) - } - if err := json.Unmarshal(echo, &e); err != nil { - t.Fatal(err) - } - - slice := e.Slice() - if len(slice) != 1 { - t.Fatalf("expected 1 element after unmarshal: %q", slice) - } - - if slice[0] != "echo" { - t.Fatalf("expected `echo`, got: %q", slice[0]) - } -} - -func TestStrSliceToString(t *testing.T) { - slices := map[*StrSlice]string{ - NewStrSlice(""): "", - NewStrSlice("one"): "one", - NewStrSlice("one", "two"): "one two", - } - for s, expected := range slices { - toString := s.ToString() - if toString != expected { - t.Fatalf("Expected %v, got %v", expected, toString) - } - } -} - -func TestStrSliceLen(t *testing.T) { - var emptyStrSlice *StrSlice - slices := map[*StrSlice]int{ - NewStrSlice(""): 1, - NewStrSlice("one"): 1, - NewStrSlice("one", "two"): 2, - emptyStrSlice: 0, - } - for s, expected := range slices { - if s.Len() != expected { - t.Fatalf("Expected %d, got %d", s.Len(), expected) - } - } -} - -func TestStrSliceSlice(t *testing.T) { - var emptyStrSlice *StrSlice - slices := map[*StrSlice][]string{ - NewStrSlice("one"): {"one"}, - NewStrSlice("one", "two"): {"one", "two"}, - emptyStrSlice: nil, - } - for s, expected := range slices { - if !reflect.DeepEqual(s.Slice(), expected) { - t.Fatalf("Expected %v, got %v", s.Slice(), expected) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE index 9e4bd4db..34c4ea7c 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE +++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2014-2015 Docker, Inc. + Copyright 2014-2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD index ac74d8f0..9b4f4a29 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD +++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD @@ -1,4 +1,4 @@ -Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. +Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_test.go b/vendor/github.com/docker/docker/pkg/symlink/fs_test.go deleted file mode 100644 index 89209484..00000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs_test.go +++ /dev/null @@ -1,402 +0,0 @@ -// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE - -package symlink - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -type dirOrLink struct { - path string - target string -} - -func makeFs(tmpdir string, fs []dirOrLink) error { - for _, s := range fs { - s.path = filepath.Join(tmpdir, s.path) - if s.target == "" { - os.MkdirAll(s.path, 0755) - continue - } - if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { - return err - } - if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { - return err - } - } - return nil -} - -func testSymlink(tmpdir, path, expected, scope string) error { - rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) - if err != nil { - return err - } - expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) - if err != nil { - return err - } - if expected != rewrite { - return fmt.Errorf("Expected %q got %q", expected, rewrite) - } - return nil -} - -func TestFollowSymlinkAbsolute(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRelativePath(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{ - {path: "linkdir", target: "realdir"}, - {path: "linkdir/foo/bar"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { - if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { - t.Fatal("expected an error") - } -} - -func TestFollowSymlinkLastLink(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { - t.Fatal(err) - } - // avoid letting allowing symlink e lead us to ../b - // normalize to the "testdata/fs/a" - if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { - t.Fatal(err) - } - // avoid letting symlink f lead us out of the "testdata" scope - // we don't normalize because symlink f is in scope and there is no - // information leak - if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { - t.Fatal(err) - } - // avoid letting symlink f lead us out of the "testdata/fs" scope - // we don't normalize because symlink f is in scope and there is no - // information leak - if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRelativeLinkChain(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - // avoid letting symlink g (pointed at by symlink h) take out of scope - // TODO: we should probably normalize to scope here because ../[....]/root - // is out of scope and we leak information - if err := makeFs(tmpdir, []dirOrLink{ - {path: "testdata/fs/b/h", target: "../g"}, - {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkBreakoutPath(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - // avoid letting symlink -> ../directory/file escape from scope - // normalize to "testdata/fs/j" - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkToRoot(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - // make sure we don't allow escaping to / - // normalize to dir - if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "foo", "", ""); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkSlashDotdot(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - tmpdir = filepath.Join(tmpdir, "dir", "subdir") - - // make sure we don't allow escaping to / - // normalize to dir - if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "foo", "", ""); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkDotdot(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - tmpdir = filepath.Join(tmpdir, "dir", "subdir") - - // make sure we stay in scope without leaking information - // this also checks for escaping to / - // normalize to dir - if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "foo", "", ""); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRelativePath2(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkScopeLink(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root2"}, - {path: "root", target: "root2"}, - {path: "root2/foo", target: "../bar"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRootScope(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - expected, err := filepath.EvalSymlinks(tmpdir) - if err != nil { - t.Fatal(err) - } - rewrite, err := FollowSymlinkInScope(tmpdir, "/") - if err != nil { - t.Fatal(err) - } - if rewrite != expected { - t.Fatalf("expected %q got %q", expected, rewrite) - } -} - -func TestFollowSymlinkEmpty(t *testing.T) { - res, err := FollowSymlinkInScope("", "") - if err != nil { - t.Fatal(err) - } - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - if res != wd { - t.Fatalf("expected %q got %q", wd, res) - } -} - -func TestFollowSymlinkCircular(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { - t.Fatal("expected an error for foo -> foo") - } - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root/bar", target: "baz"}, - {path: "root/baz", target: "../bak"}, - {path: "root/bak", target: "/bar"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { - t.Fatal("expected an error for bar -> baz -> bak -> bar") - } -} - -func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root2"}, - {path: "root", target: "root2"}, - {path: "root/a", target: "r/s"}, - {path: "root/r", target: "../root/t"}, - {path: "root/root/t/s/b", target: "/../u"}, - {path: "root/u/c", target: "."}, - {path: "root/u/x/y", target: "../v"}, - {path: "root/u/v", target: "/../w"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root/slash", target: "/"}, - {path: "root/sym", target: "/idontexist/../slash"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root/sym", target: "/foo/bar"}, - {path: "root/hello", target: "/sym/../baz"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go deleted file mode 100644 index 29bd4568..00000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go +++ /dev/null @@ -1,156 +0,0 @@ -package symlink - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/docker/docker/pkg/longpath" -) - -func toShort(path string) (string, error) { - p, err := syscall.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetShortPathName says we can reuse buffer - n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - n, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - } - return syscall.UTF16ToString(b), nil -} - -func toLong(path string) (string, error) { - p, err := syscall.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetLongPathName says we can reuse buffer - n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - } - b = b[:n] - return syscall.UTF16ToString(b), nil -} - -func evalSymlinks(path string) (string, error) { - path, err := walkSymlinks(path) - if err != nil { - return "", err - } - - p, err := toShort(path) - if err != nil { - return "", err - } - p, err = toLong(p) - if err != nil { - return "", err - } - // syscall.GetLongPathName does not change the case of the drive letter, - // but the result of EvalSymlinks must be unique, so we have - // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). - // Make drive letter upper case. - if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { - p = string(p[0]+'A'-'a') + p[1:] - } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { - p = p[:3] + string(p[4]+'A'-'a') + p[5:] - } - return filepath.Clean(p), nil -} - -const utf8RuneSelf = 0x80 - -func walkSymlinks(path string) (string, error) { - const maxIter = 255 - originalPath := path - // consume path by taking each frontmost path element, - // expanding it if it's a symlink, and appending it to b - var b bytes.Buffer - for n := 0; path != ""; n++ { - if n > maxIter { - return "", errors.New("EvalSymlinks: too many links in " + originalPath) - } - - // A path beginnging with `\\?\` represents the root, so automatically - // skip that part and begin processing the next segment. - if strings.HasPrefix(path, longpath.Prefix) { - b.WriteString(longpath.Prefix) - path = path[4:] - continue - } - - // find next path component, p - var i = -1 - for j, c := range path { - if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { - i = j - break - } - } - var p string - if i == -1 { - p, path = path, "" - } else { - p, path = path[:i], path[i+1:] - } - - if p == "" { - if b.Len() == 0 { - // must be absolute path - b.WriteRune(filepath.Separator) - } - continue - } - - // If this is the first segment after the long path prefix, accept the - // current segment as a volume root or UNC share and move on to the next. - if b.String() == longpath.Prefix { - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - - fi, err := os.Lstat(b.String() + p) - if err != nil { - return "", err - } - if fi.Mode()&os.ModeSymlink == 0 { - b.WriteString(p) - if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { - b.WriteRune(filepath.Separator) - } - continue - } - - // it's a symlink, put it at the front of path - dest, err := os.Readlink(b.String() + p) - if err != nil { - return "", err - } - if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { - b.Reset() - } - path = dest + string(filepath.Separator) + path - } - return filepath.Clean(b.String()), nil -} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/README.md b/vendor/github.com/docker/docker/pkg/sysinfo/README.md new file mode 100644 index 00000000..c1530cef --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/README.md @@ -0,0 +1 @@ +SysInfo stores information about which features a kernel supports. diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go new file mode 100644 index 00000000..cbd00999 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go @@ -0,0 +1,128 @@ +package sysinfo + +import "github.com/docker/docker/pkg/parsers" + +// SysInfo stores information about which features a kernel supports. +// TODO Windows: Factor out platform specific capabilities. +type SysInfo struct { + // Whether the kernel supports AppArmor or not + AppArmor bool + // Whether the kernel supports Seccomp or not + Seccomp bool + + cgroupMemInfo + cgroupCPUInfo + cgroupBlkioInfo + cgroupCpusetInfo + cgroupPids + + // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work + IPv4ForwardingDisabled bool + + // Whether bridge-nf-call-iptables is supported or not + BridgeNFCallIPTablesDisabled bool + + // Whether bridge-nf-call-ip6tables is supported or not + BridgeNFCallIP6TablesDisabled bool + + // Whether the cgroup has the mountpoint of "devices" or not + CgroupDevicesEnabled bool +} + +type cgroupMemInfo struct { + // Whether memory limit is supported or not + MemoryLimit bool + + // Whether swap limit is supported or not + SwapLimit bool + + // Whether soft limit is supported or not + MemoryReservation bool + + // Whether OOM killer disable is supported or not + OomKillDisable bool + + // Whether memory swappiness is supported or not + MemorySwappiness bool + + // Whether kernel memory limit is supported or not + KernelMemory bool +} + +type cgroupCPUInfo struct { + // Whether CPU shares is supported or not + CPUShares bool + + // Whether CPU CFS(Completely Fair Scheduler) period is supported or not + CPUCfsPeriod bool + + // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not + CPUCfsQuota bool +} + +type cgroupBlkioInfo struct { + // Whether Block IO weight is supported or not + BlkioWeight bool + + // Whether Block IO weight_device is supported or not + BlkioWeightDevice bool + + // Whether Block IO read limit in bytes per second is supported or not + BlkioReadBpsDevice bool + + // Whether Block IO write limit in bytes per second is supported or not + BlkioWriteBpsDevice bool + + // Whether Block IO read limit in IO per second is supported or not + BlkioReadIOpsDevice bool + + // Whether Block IO write limit in IO per second is supported or not + BlkioWriteIOpsDevice bool +} + +type cgroupCpusetInfo struct { + // Whether Cpuset is supported or not + Cpuset bool + + // Available Cpuset's cpus + Cpus string + + // Available Cpuset's memory nodes + Mems string +} + +type cgroupPids struct { + // Whether Pids Limit is supported or not + PidsLimit bool +} + +// IsCpusetCpusAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.cpus set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Cpus) +} + +// IsCpusetMemsAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.mems set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Mems) +} + +func isCpusetListAvailable(provided, available string) (bool, error) { + parsedProvided, err := parsers.ParseUintList(provided) + if err != nil { + return false, err + } + parsedAvailable, err := parsers.ParseUintList(available) + if err != nil { + return false, err + } + for k := range parsedProvided { + if !parsedAvailable[k] { + return false, nil + } + } + return true, nil +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_freebsd.go new file mode 100644 index 00000000..22ae0d95 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_freebsd.go @@ -0,0 +1,7 @@ +package sysinfo + +// New returns an empty SysInfo for freebsd for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go new file mode 100644 index 00000000..41fb0d2b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go @@ -0,0 +1,246 @@ +package sysinfo + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/cgroups" +) + +const ( + // SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER. + SeccompModeFilter = uintptr(2) +) + +func findCgroupMountpoints() (map[string]string, error) { + cgMounts, err := cgroups.GetCgroupMounts() + if err != nil { + return nil, fmt.Errorf("Failed to parse cgroup information: %v", err) + } + mps := make(map[string]string) + for _, m := range cgMounts { + for _, ss := range m.Subsystems { + mps[ss] = m.Mountpoint + } + } + return mps, nil +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. If `quiet` is `false` warnings are printed in logs +// whenever an error occurs or misconfigurations are present. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + cgMounts, err := findCgroupMountpoints() + if err != nil { + logrus.Warnf("Failed to parse cgroup information: %v", err) + } else { + sysInfo.cgroupMemInfo = checkCgroupMem(cgMounts, quiet) + sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet) + sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet) + sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet) + sysInfo.cgroupPids = checkCgroupPids(quiet) + } + + _, ok := cgMounts["devices"] + sysInfo.CgroupDevicesEnabled = ok + + sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") + sysInfo.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") + sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") + + // Check if AppArmor is supported. + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + sysInfo.AppArmor = true + } + + // Check if Seccomp is supported, via CONFIG_SECCOMP. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL { + sysInfo.Seccomp = true + } + } + + return sysInfo +} + +// checkCgroupMem reads the memory information from the memory cgroup mount point. +func checkCgroupMem(cgMounts map[string]string, quiet bool) cgroupMemInfo { + mountPoint, ok := cgMounts["memory"] + if !ok { + if !quiet { + logrus.Warnf("Your kernel does not support cgroup memory limit") + } + return cgroupMemInfo{} + } + + swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") + if !quiet && !swapLimit { + logrus.Warn("Your kernel does not support swap memory limit.") + } + memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes") + if !quiet && !memoryReservation { + logrus.Warn("Your kernel does not support memory reservation.") + } + oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control") + if !quiet && !oomKillDisable { + logrus.Warnf("Your kernel does not support oom control.") + } + memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness") + if !quiet && !memorySwappiness { + logrus.Warnf("Your kernel does not support memory swappiness.") + } + kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes") + if !quiet && !kernelMemory { + logrus.Warnf("Your kernel does not support kernel memory limit.") + } + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: swapLimit, + MemoryReservation: memoryReservation, + OomKillDisable: oomKillDisable, + MemorySwappiness: memorySwappiness, + KernelMemory: kernelMemory, + } +} + +// checkCgroupCPU reads the cpu information from the cpu cgroup mount point. +func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo { + mountPoint, ok := cgMounts["cpu"] + if !ok { + if !quiet { + logrus.Warnf("Unable to find cpu cgroup in mounts") + } + return cgroupCPUInfo{} + } + + cpuShares := cgroupEnabled(mountPoint, "cpu.shares") + if !quiet && !cpuShares { + logrus.Warn("Your kernel does not support cgroup cpu shares") + } + + cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us") + if !quiet && !cpuCfsPeriod { + logrus.Warn("Your kernel does not support cgroup cfs period") + } + + cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us") + if !quiet && !cpuCfsQuota { + logrus.Warn("Your kernel does not support cgroup cfs quotas") + } + return cgroupCPUInfo{ + CPUShares: cpuShares, + CPUCfsPeriod: cpuCfsPeriod, + CPUCfsQuota: cpuCfsQuota, + } +} + +// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point. +func checkCgroupBlkioInfo(cgMounts map[string]string, quiet bool) cgroupBlkioInfo { + mountPoint, ok := cgMounts["blkio"] + if !ok { + if !quiet { + logrus.Warnf("Unable to find blkio cgroup in mounts") + } + return cgroupBlkioInfo{} + } + + weight := cgroupEnabled(mountPoint, "blkio.weight") + if !quiet && !weight { + logrus.Warn("Your kernel does not support cgroup blkio weight") + } + + weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device") + if !quiet && !weightDevice { + logrus.Warn("Your kernel does not support cgroup blkio weight_device") + } + + readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device") + if !quiet && !readBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device") + } + + writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device") + if !quiet && !writeBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device") + } + readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device") + if !quiet && !readIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device") + } + + writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device") + if !quiet && !writeIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device") + } + return cgroupBlkioInfo{ + BlkioWeight: weight, + BlkioWeightDevice: weightDevice, + BlkioReadBpsDevice: readBpsDevice, + BlkioWriteBpsDevice: writeBpsDevice, + BlkioReadIOpsDevice: readIOpsDevice, + BlkioWriteIOpsDevice: writeIOpsDevice, + } +} + +// checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point. +func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetInfo { + mountPoint, ok := cgMounts["cpuset"] + if !ok { + if !quiet { + logrus.Warnf("Unable to find cpuset cgroup in mounts") + } + return cgroupCpusetInfo{} + } + + cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus")) + if err != nil { + return cgroupCpusetInfo{} + } + + mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems")) + if err != nil { + return cgroupCpusetInfo{} + } + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: strings.TrimSpace(string(cpus)), + Mems: strings.TrimSpace(string(mems)), + } +} + +// checkCgroupPids reads the pids information from the pids cgroup mount point. +func checkCgroupPids(quiet bool) cgroupPids { + _, err := cgroups.FindCgroupMountpoint("pids") + if err != nil { + if !quiet { + logrus.Warn(err) + } + return cgroupPids{} + } + + return cgroupPids{ + PidsLimit: true, + } +} + +func cgroupEnabled(mountPoint, name string) bool { + _, err := os.Stat(path.Join(mountPoint, name)) + return err == nil +} + +func readProcBool(path string) bool { + val, err := ioutil.ReadFile(path) + if err != nil { + return false + } + return strings.TrimSpace(string(val)) == "1" +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go index 31ed9ff1..7637f12e 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes.go +++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -2,14 +2,30 @@ package system import ( "os" + "syscall" "time" + "unsafe" ) +var ( + maxTime time.Time +) + +func init() { + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} + // Chtimes changes the access time and modified time of a file at the given path func Chtimes(name string, atime time.Time, mtime time.Time) error { unixMinTime := time.Unix(0, 0) - // The max Unix time is 33 bits set - unixMaxTime := unixMinTime.Add((1<<33 - 1) * time.Second) + unixMaxTime := maxTime // If the modified time is prior to the Unix Epoch, or after the // end of Unix Time, os.Chtimes has undefined behavior @@ -27,5 +43,10 @@ func Chtimes(name string, atime time.Time, mtime time.Time) error { return err } + // Take platform specific action for setting create time. + if err := setCTime(name, mtime); err != nil { + return err + } + return nil } diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_test.go deleted file mode 100644 index f65a4b80..00000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package system - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" -) - -// prepareTempFile creates a temporary file in a temporary directory. -func prepareTempFile(t *testing.T) (string, string) { - dir, err := ioutil.TempDir("", "docker-system-test") - if err != nil { - t.Fatal(err) - } - - file := filepath.Join(dir, "exist") - if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { - t.Fatal(err) - } - return file, dir -} - -// TestChtimes tests Chtimes on a tempfile. Test only mTime, because aTime is OS dependent -func TestChtimes(t *testing.T) { - file, dir := prepareTempFile(t) - defer os.RemoveAll(dir) - - beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) - unixEpochTime := time.Unix(0, 0) - afterUnixEpochTime := time.Unix(100, 0) - // The max Unix time is 33 bits set - unixMaxTime := unixEpochTime.Add((1<<33 - 1) * time.Second) - afterUnixMaxTime := unixMaxTime.Add(100 * time.Second) - - // Test both aTime and mTime set to Unix Epoch - Chtimes(file, unixEpochTime, unixEpochTime) - - f, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) - } - - // Test aTime before Unix Epoch and mTime set to Unix Epoch - Chtimes(file, beforeUnixEpochTime, unixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixEpochTime, beforeUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) - } - - // Test both aTime and mTime set to after Unix Epoch (valid time) - Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != afterUnixEpochTime { - t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, f.ModTime()) - } - - // Test both aTime and mTime set to Unix max time - Chtimes(file, unixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixMaxTime { - t.Fatalf("Expected: %s, got: %s", unixMaxTime, f.ModTime()) - } - - // Test aTime after Unix max time and mTime set to Unix max time - Chtimes(file, afterUnixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixMaxTime { - t.Fatalf("Expected: %s, got: %s", unixMaxTime, f.ModTime()) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixMaxTime, afterUnixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go new file mode 100644 index 00000000..09d58bcb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go deleted file mode 100644 index 6998bbef..00000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// +build linux freebsd - -package system - -import ( - "os" - "syscall" - "testing" - "time" -) - -// TestChtimes tests Chtimes access time on a tempfile on Linux -func TestChtimesLinux(t *testing.T) { - file, dir := prepareTempFile(t) - defer os.RemoveAll(dir) - - beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) - unixEpochTime := time.Unix(0, 0) - afterUnixEpochTime := time.Unix(100, 0) - // The max Unix time is 33 bits set - unixMaxTime := unixEpochTime.Add((1<<33 - 1) * time.Second) - afterUnixMaxTime := unixMaxTime.Add(100 * time.Second) - - // Test both aTime and mTime set to Unix Epoch - Chtimes(file, unixEpochTime, unixEpochTime) - - f, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat := f.Sys().(*syscall.Stat_t) - aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime before Unix Epoch and mTime set to Unix Epoch - Chtimes(file, beforeUnixEpochTime, unixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixEpochTime, beforeUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test both aTime and mTime set to after Unix Epoch (valid time) - Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != afterUnixEpochTime { - t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) - } - - // Test both aTime and mTime set to Unix max time - Chtimes(file, unixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixMaxTime { - t.Fatalf("Expected: %s, got: %s", unixMaxTime, aTime) - } - - // Test aTime after Unix max time and mTime set to Unix max time - Chtimes(file, afterUnixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixMaxTime, afterUnixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixMaxTime { - t.Fatalf("Expected: %s, got: %s", unixMaxTime, aTime) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go deleted file mode 100644 index f09c4028..00000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "syscall" - "testing" - "time" -) - -// TestChtimes tests Chtimes access time on a tempfile on Windows -func TestChtimesWindows(t *testing.T) { - file, dir := prepareTempFile(t) - defer os.RemoveAll(dir) - - beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) - unixEpochTime := time.Unix(0, 0) - afterUnixEpochTime := time.Unix(100, 0) - // The max Unix time is 33 bits set - unixMaxTime := unixEpochTime.Add((1<<33 - 1) * time.Second) - afterUnixMaxTime := unixMaxTime.Add(100 * time.Second) - - // Test both aTime and mTime set to Unix Epoch - Chtimes(file, unixEpochTime, unixEpochTime) - - f, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime := time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime before Unix Epoch and mTime set to Unix Epoch - Chtimes(file, beforeUnixEpochTime, unixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixEpochTime, beforeUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test both aTime and mTime set to after Unix Epoch (valid time) - Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != afterUnixEpochTime { - t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) - } - - // Test both aTime and mTime set to Unix max time - Chtimes(file, unixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixMaxTime { - t.Fatalf("Expected: %s, got: %s", unixMaxTime, aTime) - } - - // Test aTime after Unix max time and mTime set to Unix max time - Chtimes(file, afterUnixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixMaxTime, afterUnixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixMaxTime { - t.Fatalf("Expected: %s, got: %s", unixMaxTime, aTime) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go deleted file mode 100644 index 04e2de78..00000000 --- a/vendor/github.com/docker/docker/pkg/system/events_windows.go +++ /dev/null @@ -1,83 +0,0 @@ -package system - -// This file implements syscalls for Win32 events which are not implemented -// in golang. - -import ( - "syscall" - "unsafe" -) - -var ( - procCreateEvent = modkernel32.NewProc("CreateEventW") - procOpenEvent = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") -) - -// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. -func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if manualReset { - _p1 = 1 - } - var _p2 uint32 - if initialState { - _p2 = 1 - } - r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. -func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if inheritHandle { - _p1 = 1 - } - r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// SetEvent implements win32 SetEvent func in golang. -func SetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procSetEvent) -} - -// ResetEvent implements win32 ResetEvent func in golang. -func ResetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procResetEvent) -} - -// PulseEvent implements win32 PulseEvent func in golang. -func PulseEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procPulseEvent) -} - -func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { - r0, _, _ := proc.Call(uintptr(handle)) - if r0 != 0 { - err = syscall.Errno(r0) - } - return -} - -var temp unsafe.Pointer - -// use ensures a variable is kept alive without the GC freeing while still needed -func use(p unsafe.Pointer) { - temp = p -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go deleted file mode 100644 index 16823d55..00000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ /dev/null @@ -1,82 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "path/filepath" - "regexp" - "strings" - "syscall" -) - -// MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, perm os.FileMode) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { - return nil - } - - // The rest of this method is copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = MkdirAll(path[0:j-1], perm) - if err != nil { - return err - } - } - - // Parent now exists; invoke Mkdir and use its result. - err = os.Mkdir(path, perm) - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if !filepath.IsAbs(path) { - if !strings.HasPrefix(path, string(os.PathSeparator)) { - return false - } - } - return true -} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go deleted file mode 100644 index 062cf53b..00000000 --- a/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build linux freebsd - -package system - -import ( - "os" - "testing" -) - -// TestLstat tests Lstat for existing and non existing files -func TestLstat(t *testing.T) { - file, invalid, _, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - statFile, err := Lstat(file) - if err != nil { - t.Fatal(err) - } - if statFile == nil { - t.Fatal("returned empty stat for existing file") - } - - statInvalid, err := Lstat(invalid) - if err == nil { - t.Fatal("did not return error for non-existing file") - } - if statInvalid != nil { - t.Fatal("returned non-nil stat for non-existing file") - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go deleted file mode 100644 index 49e87eb4..00000000 --- a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package system - -import ( - "os" -) - -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -// Note the Linux version uses fromStatT to do the copy back, -// but that not strictly necessary when already in an OS specific module. -func Lstat(path string) (*StatT, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return &StatT{ - name: fi.Name(), - size: fi.Size(), - mode: fi.Mode(), - modTime: fi.ModTime(), - isDir: fi.IsDir()}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go index a07bb17c..385f1d5e 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -7,11 +7,11 @@ import ( "strconv" "strings" - "github.com/docker/docker/pkg/units" + "github.com/docker/go-units" ) // ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. +// MemInfo type. func ReadMemInfo() (*MemInfo, error) { file, err := os.Open("/proc/meminfo") if err != nil { @@ -22,8 +22,7 @@ func ReadMemInfo() (*MemInfo, error) { } // parseMemInfo parses the /proc/meminfo file into -// a MemInfo object given a io.Reader to the file. -// +// a MemInfo object given an io.Reader to the file. // Throws error if there are problems reading from the file func parseMemInfo(reader io.Reader) (*MemInfo, error) { meminfo := &MemInfo{} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go deleted file mode 100644 index c8fec629..00000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build linux freebsd - -package system - -import ( - "strings" - "testing" - - "github.com/docker/docker/pkg/units" -) - -// TestMemInfo tests parseMemInfo with a static meminfo string -func TestMemInfo(t *testing.T) { - const input = ` - MemTotal: 1 kB - MemFree: 2 kB - SwapTotal: 3 kB - SwapFree: 4 kB - Malformed1: - Malformed2: 1 - Malformed3: 2 MB - Malformed4: X kB - ` - meminfo, err := parseMemInfo(strings.NewReader(input)) - if err != nil { - t.Fatal(err) - } - if meminfo.MemTotal != 1*units.KiB { - t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) - } - if meminfo.MemFree != 2*units.KiB { - t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) - } - if meminfo.SwapTotal != 3*units.KiB { - t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) - } - if meminfo.SwapFree != 4*units.KiB { - t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go deleted file mode 100644 index d4664259..00000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go +++ /dev/null @@ -1,44 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &MemInfo{}, nil - } - return &MemInfo{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go deleted file mode 100644 index 2e863c02..00000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package system - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go new file mode 100644 index 00000000..1b6cc9cb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go new file mode 100644 index 00000000..3c3b71fb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go @@ -0,0 +1,15 @@ +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go similarity index 81% rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go rename to vendor/github.com/docker/docker/pkg/system/stat_solaris.go index 381ea821..b01d08ac 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go @@ -1,4 +1,4 @@ -// +build !linux,!windows,!freebsd +// +build solaris package system @@ -13,5 +13,5 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil + mtim: s.Mtim}, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go b/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go deleted file mode 100644 index dee8d30a..00000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build linux freebsd - -package system - -import ( - "os" - "syscall" - "testing" -) - -// TestFromStatT tests fromStatT for a tempfile -func TestFromStatT(t *testing.T) { - file, _, _, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - stat := &syscall.Stat_t{} - err := syscall.Lstat(file, stat) - - s, err := fromStatT(stat) - if err != nil { - t.Fatal(err) - } - - if stat.Mode != s.Mode() { - t.Fatal("got invalid mode") - } - if stat.Uid != s.UID() { - t.Fatal("got invalid uid") - } - if stat.Gid != s.GID() { - t.Fatal("got invalid gid") - } - if stat.Rdev != s.Rdev() { - t.Fatal("got invalid rdev") - } - if stat.Mtim != s.Mtim() { - t.Fatal("got invalid mtim") - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go index 381ea821..f53e9de4 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!windows,!freebsd +// +build !linux,!windows,!freebsd,!solaris,!openbsd package system diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go deleted file mode 100644 index 39490c62..00000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "time" -) - -// StatT type contains status of a file. It contains metadata -// like name, permission, size, etc about a file. -type StatT struct { - name string - size int64 - mode os.FileMode - modTime time.Time - isDir bool -} - -// Name returns file's name. -func (s StatT) Name() string { - return s.name -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mode returns file's permission mode. -func (s StatT) Mode() os.FileMode { - return s.mode -} - -// ModTime returns file's last modification time. -func (s StatT) ModTime() time.Time { - return s.modTime -} - -// IsDir returns whether file is actually a directory. -func (s StatT) IsDir() bool { - return s.isDir -} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go index f1497c58..3ae91284 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go @@ -9,3 +9,9 @@ import "syscall" func Unmount(dest string) error { return syscall.Unmount(dest, 0) } + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go deleted file mode 100644 index 273aa234..00000000 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ /dev/null @@ -1,36 +0,0 @@ -package system - -import ( - "fmt" - "syscall" -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -// GetOSVersion gets the operating system version on Windows. Note that -// docker.exe must be manifested to get the correct version information. -func GetOSVersion() (OSVersion, error) { - var err error - osv := OSVersion{} - osv.Version, err = syscall.GetVersion() - if err != nil { - return osv, fmt.Errorf("Failed to call GetVersion()") - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) - return osv, nil -} - -// Unmount is a platform-specific helper function to call -// the unmount syscall. Not supported on Windows -func Unmount(dest string) error { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go deleted file mode 100644 index 13f1de17..00000000 --- a/vendor/github.com/docker/docker/pkg/system/umask_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package system - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go index 007bfa8c..fc8a1aba 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go @@ -5,7 +5,7 @@ import ( "unsafe" ) -// LUtimesNano is used to change access and modification time of the speficied path. +// LUtimesNano is used to change access and modification time of the specified path. // It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { // These are not currently available in syscall diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go deleted file mode 100644 index 1ee0d099..00000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// +build linux freebsd - -package system - -import ( - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" -) - -// prepareFiles creates files for testing in the temp directory -func prepareFiles(t *testing.T) (string, string, string, string) { - dir, err := ioutil.TempDir("", "docker-system-test") - if err != nil { - t.Fatal(err) - } - - file := filepath.Join(dir, "exist") - if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { - t.Fatal(err) - } - - invalid := filepath.Join(dir, "doesnt-exist") - - symlink := filepath.Join(dir, "symlink") - if err := os.Symlink(file, symlink); err != nil { - t.Fatal(err) - } - - return file, invalid, symlink, dir -} - -func TestLUtimesNano(t *testing.T) { - file, invalid, symlink, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - before, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - ts := []syscall.Timespec{{0, 0}, {0, 0}} - if err := LUtimesNano(symlink, ts); err != nil { - t.Fatal(err) - } - - symlinkInfo, err := os.Lstat(symlink) - if err != nil { - t.Fatal(err) - } - if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { - t.Fatal("The modification time of the symlink should be different") - } - - fileInfo, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - if before.ModTime().Unix() != fileInfo.ModTime().Unix() { - t.Fatal("The modification time of the file should be same") - } - - if err := LUtimesNano(invalid, ts); err == nil { - t.Fatal("Doesn't return an error on a non-existing file") - } -} diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go new file mode 100644 index 00000000..d580584d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go @@ -0,0 +1,66 @@ +// Package tailfile provides helper functions to read the nth lines of any +// ReadSeeker. +package tailfile + +import ( + "bytes" + "errors" + "io" + "os" +) + +const blockSize = 1024 + +var eol = []byte("\n") + +// ErrNonPositiveLinesNumber is an error returned if the lines number was negative. +var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") + +//TailFile returns last n lines of reader f (could be a fil). +func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { + if n <= 0 { + return nil, ErrNonPositiveLinesNumber + } + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + block := -1 + var data []byte + var cnt int + for { + var b []byte + step := int64(block * blockSize) + left := size + step // how many bytes to beginning + if left < 0 { + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + b = make([]byte, blockSize+left) + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + break + } else { + b = make([]byte, blockSize) + if _, err := f.Seek(step, os.SEEK_END); err != nil { + return nil, err + } + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + } + cnt += bytes.Count(b, eol) + if cnt > n { + break + } + block-- + } + lines := bytes.Split(data, eol) + if n < len(lines) { + return lines[len(lines)-n-1 : len(lines)-1], nil + } + return lines[:len(lines)-1], nil +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go deleted file mode 100644 index 719f7289..00000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package tarsum - -import ( - "io" - "io/ioutil" - "os" - "testing" -) - -// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing -func TestTarSumRemoveNonExistent(t *testing.T) { - filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" - reader, err := os.Open(filename) - if err != nil { - t.Fatal(err) - } - ts, err := NewTarSum(reader, false, Version0) - if err != nil { - t.Fatal(err) - } - - // Read and discard bytes so that it populates sums - _, err = io.Copy(ioutil.Discard, ts) - if err != nil { - t.Errorf("failed to read from %s: %s", filename, err) - } - - expected := len(ts.GetSums()) - - ts.(BuilderContext).Remove("") - ts.(BuilderContext).Remove("Anything") - - if len(ts.GetSums()) != expected { - t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums()) - } -} - -// Remove a tarsum (in the BuilderContext) -func TestTarSumRemove(t *testing.T) { - filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" - reader, err := os.Open(filename) - if err != nil { - t.Fatal(err) - } - ts, err := NewTarSum(reader, false, Version0) - if err != nil { - t.Fatal(err) - } - - // Read and discard bytes so that it populates sums - _, err = io.Copy(ioutil.Discard, ts) - if err != nil { - t.Errorf("failed to read from %s: %s", filename, err) - } - - expected := len(ts.GetSums()) - 1 - - ts.(BuilderContext).Remove("etc/sudoers") - - if len(ts.GetSums()) != expected { - t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums())) - } -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go index 7c2161c2..5abf5e7b 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go @@ -4,7 +4,7 @@ import "sort" // FileInfoSumInterface provides an interface for accessing file checksum // information within a tar file. This info is accessed through interface -// so the actual name and sum cannot be medled with. +// so the actual name and sum cannot be melded with. type FileInfoSumInterface interface { // File name Name() string diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go deleted file mode 100644 index bb700d8b..00000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package tarsum - -import "testing" - -func newFileInfoSums() FileInfoSums { - return FileInfoSums{ - fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, - fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, - fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, - fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, - fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, - fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, - } -} - -func TestSortFileInfoSums(t *testing.T) { - dups := newFileInfoSums().GetAllFile("dup1") - if len(dups) != 2 { - t.Errorf("expected length 2, got %d", len(dups)) - } - dups.SortByNames() - if dups[0].Pos() != 4 { - t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) - } - - fis := newFileInfoSums() - expected := "0abcdef1234567890" - fis.SortBySums() - got := fis[0].Sum() - if got != expected { - t.Errorf("Expected %q, got %q", expected, got) - } - - fis = newFileInfoSums() - expected = "dup1" - fis.SortByNames() - gotFis := fis[0] - if gotFis.Name() != expected { - t.Errorf("Expected %q, got %q", expected, gotFis.Name()) - } - // since a duplicate is first, ensure it is ordered first by position too - if gotFis.Pos() != 4 { - t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) - } - - fis = newFileInfoSums() - fis.SortByPos() - if fis[0].Pos() != 0 { - t.Errorf("sorted fileInfoSums by Pos should order them by position.") - } - - fis = newFileInfoSums() - expected = "deadbeef1" - gotFileInfoSum := fis.GetFile("dup1") - if gotFileInfoSum.Sum() != expected { - t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) - } - if fis.GetFile("noPresent") != nil { - t.Errorf("Should have return nil if name not found.") - } - -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go index 0f5783be..4dc89bd4 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -146,7 +146,7 @@ var ( } ) -// DefaultTHash is default TarSum hashing algoritm - "sha256". +// DefaultTHash is default TarSum hashing algorithm - "sha256". var DefaultTHash = NewTHash("sha256", sha256.New) type simpleTHash struct { @@ -261,7 +261,7 @@ func (ts *tarSum) Read(buf []byte) (int, error) { return 0, err } - // Filling the tar writter + // Filling the tar writer if _, err = ts.tarW.Write(buf2[:n]); err != nil { return 0, err } diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md index 77927ee7..89b2e49f 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md @@ -223,7 +223,7 @@ with matching paths, and orders the list of file sums accordingly [3]. * [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 * [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 -## Acknowledgements +## Acknowledgments Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the TarSum calculation. diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go deleted file mode 100644 index 89626660..00000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go +++ /dev/null @@ -1,648 +0,0 @@ -package tarsum - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "testing" -) - -type testLayer struct { - filename string - options *sizedOptions - jsonfile string - gzip bool - tarsum string - version Version - hash THash -} - -var testLayers = []testLayer{ - { - filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", - jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", - version: Version0, - tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, - { - filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", - jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", - version: VersionDev, - tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"}, - { - filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", - jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", - gzip: true, - tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, - { - // Tests existing version of TarSum when xattrs are present - filename: "testdata/xattr/layer.tar", - jsonfile: "testdata/xattr/json", - version: Version0, - tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"}, - { - // Tests next version of TarSum when xattrs are present - filename: "testdata/xattr/layer.tar", - jsonfile: "testdata/xattr/json", - version: VersionDev, - tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"}, - { - filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", - jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", - tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"}, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, - { - // this tar has two files with the same path - filename: "testdata/collision/collision-0.tar", - tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, - { - // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above - filename: "testdata/collision/collision-1.tar", - tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, - { - // this tar has newer of collider-0.tar, ensuring is has different hash - filename: "testdata/collision/collision-2.tar", - tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, - { - // this tar has newer of collider-1.tar, ensuring is has different hash - filename: "testdata/collision/collision-3.tar", - tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", - hash: md5THash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", - hash: sha1Hash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", - hash: sha224Hash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", - hash: sha384Hash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", - hash: sha512Hash, - }, -} - -type sizedOptions struct { - num int64 - size int64 - isRand bool - realFile bool -} - -// make a tar: -// * num is the number of files the tar should have -// * size is the bytes per file -// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) -// * realFile will write to a TempFile, instead of an in memory buffer -func sizedTar(opts sizedOptions) io.Reader { - var ( - fh io.ReadWriter - err error - ) - if opts.realFile { - fh, err = ioutil.TempFile("", "tarsum") - if err != nil { - return nil - } - } else { - fh = bytes.NewBuffer([]byte{}) - } - tarW := tar.NewWriter(fh) - defer tarW.Close() - for i := int64(0); i < opts.num; i++ { - err := tarW.WriteHeader(&tar.Header{ - Name: fmt.Sprintf("/testdata%d", i), - Mode: 0755, - Uid: 0, - Gid: 0, - Size: opts.size, - }) - if err != nil { - return nil - } - var rBuf []byte - if opts.isRand { - rBuf = make([]byte, 8) - _, err = rand.Read(rBuf) - if err != nil { - return nil - } - } else { - rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} - } - - for i := int64(0); i < opts.size/int64(8); i++ { - tarW.Write(rBuf) - } - } - return fh -} - -func emptyTarSum(gzip bool) (TarSum, error) { - reader, writer := io.Pipe() - tarWriter := tar.NewWriter(writer) - - // Immediately close tarWriter and write-end of the - // Pipe in a separate goroutine so we don't block. - go func() { - tarWriter.Close() - writer.Close() - }() - - return NewTarSum(reader, !gzip, Version0) -} - -// Test errors on NewTarsumForLabel -func TestNewTarSumForLabelInvalid(t *testing.T) { - reader := strings.NewReader("") - - if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil { - t.Fatalf("Expected an error, got nothing.") - } - - if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil { - t.Fatalf("Expected an error, got nothing.") - } - if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil { - t.Fatalf("Expected an error, got nothing.") - } -} - -func TestNewTarSumForLabel(t *testing.T) { - - layer := testLayers[0] - - reader, err := os.Open(layer.filename) - if err != nil { - t.Fatal(err) - } - label := strings.Split(layer.tarsum, ":")[0] - ts, err := NewTarSumForLabel(reader, false, label) - if err != nil { - t.Fatal(err) - } - - // Make sure it actually worked by reading a little bit of it - nbByteToRead := 8 * 1024 - dBuf := make([]byte, nbByteToRead) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) - } -} - -// TestEmptyTar tests that tarsum does not fail to read an empty tar -// and correctly returns the hex digest of an empty hash. -func TestEmptyTar(t *testing.T) { - // Test without gzip. - ts, err := emptyTarSum(false) - if err != nil { - t.Fatal(err) - } - - zeroBlock := make([]byte, 1024) - buf := new(bytes.Buffer) - - n, err := io.Copy(buf, ts) - if err != nil { - t.Fatal(err) - } - - if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { - t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) - } - - expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) - resultSum := ts.Sum(nil) - - if resultSum != expectedSum { - t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) - } - - // Test with gzip. - ts, err = emptyTarSum(true) - if err != nil { - t.Fatal(err) - } - buf.Reset() - - n, err = io.Copy(buf, ts) - if err != nil { - t.Fatal(err) - } - - bufgz := new(bytes.Buffer) - gz := gzip.NewWriter(bufgz) - n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) - gz.Close() - gzBytes := bufgz.Bytes() - - if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { - t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) - } - - resultSum = ts.Sum(nil) - - if resultSum != expectedSum { - t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) - } - - // Test without ever actually writing anything. - if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { - t.Fatal(err) - } - - resultSum = ts.Sum(nil) - - if resultSum != expectedSum { - t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) - } -} - -var ( - md5THash = NewTHash("md5", md5.New) - sha1Hash = NewTHash("sha1", sha1.New) - sha224Hash = NewTHash("sha224", sha256.New224) - sha384Hash = NewTHash("sha384", sha512.New384) - sha512Hash = NewTHash("sha512", sha512.New) -) - -// Test all the build-in read size : buf8K, buf16K, buf32K and more -func TestTarSumsReadSize(t *testing.T) { - // Test always on the same layer (that is big enough) - layer := testLayers[0] - - for i := 0; i < 5; i++ { - - reader, err := os.Open(layer.filename) - if err != nil { - t.Fatal(err) - } - ts, err := NewTarSum(reader, false, layer.version) - if err != nil { - t.Fatal(err) - } - - // Read and discard bytes so that it populates sums - nbByteToRead := (i + 1) * 8 * 1024 - dBuf := make([]byte, nbByteToRead) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) - continue - } - } -} - -func TestTarSums(t *testing.T) { - for _, layer := range testLayers { - var ( - fh io.Reader - err error - ) - if len(layer.filename) > 0 { - fh, err = os.Open(layer.filename) - if err != nil { - t.Errorf("failed to open %s: %s", layer.filename, err) - continue - } - } else if layer.options != nil { - fh = sizedTar(*layer.options) - } else { - // What else is there to test? - t.Errorf("what to do with %#v", layer) - continue - } - if file, ok := fh.(*os.File); ok { - defer file.Close() - } - - var ts TarSum - if layer.hash == nil { - // double negatives! - ts, err = NewTarSum(fh, !layer.gzip, layer.version) - } else { - ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) - } - if err != nil { - t.Errorf("%q :: %q", err, layer.filename) - continue - } - - // Read variable number of bytes to test dynamic buffer - dBuf := make([]byte, 1) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read 1B from %s: %s", layer.filename, err) - continue - } - dBuf = make([]byte, 16*1024) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) - continue - } - - // Read and discard remaining bytes - _, err = io.Copy(ioutil.Discard, ts) - if err != nil { - t.Errorf("failed to copy from %s: %s", layer.filename, err) - continue - } - var gotSum string - if len(layer.jsonfile) > 0 { - jfh, err := os.Open(layer.jsonfile) - if err != nil { - t.Errorf("failed to open %s: %s", layer.jsonfile, err) - continue - } - buf, err := ioutil.ReadAll(jfh) - if err != nil { - t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) - continue - } - gotSum = ts.Sum(buf) - } else { - gotSum = ts.Sum(nil) - } - - if layer.tarsum != gotSum { - t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) - } - var expectedHashName string - if layer.hash != nil { - expectedHashName = layer.hash.Name() - } else { - expectedHashName = DefaultTHash.Name() - } - if expectedHashName != ts.Hash().Name() { - t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name()) - } - } -} - -func TestIteration(t *testing.T) { - headerTests := []struct { - expectedSum string // TODO(vbatts) it would be nice to get individual sums of each - version Version - hdr *tar.Header - data []byte - }{ - { - "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", - Version0, - &tar.Header{ - Name: "file.txt", - Size: 0, - Typeflag: tar.TypeReg, - Devminor: 0, - Devmajor: 0, - }, - []byte(""), - }, - { - "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", - VersionDev, - &tar.Header{ - Name: "file.txt", - Size: 0, - Typeflag: tar.TypeReg, - Devminor: 0, - Devmajor: 0, - }, - []byte(""), - }, - { - "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", - VersionDev, - &tar.Header{ - Name: "another.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Devminor: 0, - Devmajor: 0, - }, - []byte("test"), - }, - { - "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", - VersionDev, - &tar.Header{ - Name: "xattrs.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Xattrs: map[string]string{ - "user.key1": "value1", - "user.key2": "value2", - }, - }, - []byte("test"), - }, - { - "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", - VersionDev, - &tar.Header{ - Name: "xattrs.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Xattrs: map[string]string{ - "user.KEY1": "value1", // adding different case to ensure different sum - "user.key2": "value2", - }, - }, - []byte("test"), - }, - { - "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", - Version0, - &tar.Header{ - Name: "xattrs.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Xattrs: map[string]string{ - "user.NOT": "CALCULATED", - }, - }, - []byte("test"), - }, - } - for _, htest := range headerTests { - s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) - if err != nil { - t.Fatal(err) - } - - if s != htest.expectedSum { - t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) - } - } - -} - -func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { - buf := bytes.NewBuffer(nil) - // first build our test tar - tw := tar.NewWriter(buf) - if err := tw.WriteHeader(h); err != nil { - return "", err - } - if _, err := tw.Write(data); err != nil { - return "", err - } - tw.Close() - - ts, err := NewTarSum(buf, true, v) - if err != nil { - return "", err - } - tr := tar.NewReader(ts) - for { - hdr, err := tr.Next() - if hdr == nil || err == io.EOF { - // Signals the end of the archive. - break - } - if err != nil { - return "", err - } - if _, err = io.Copy(ioutil.Discard, tr); err != nil { - return "", err - } - } - return ts.Sum(nil), nil -} - -func Benchmark9kTar(b *testing.B) { - buf := bytes.NewBuffer([]byte{}) - fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") - if err != nil { - b.Error(err) - return - } - n, err := io.Copy(buf, fh) - fh.Close() - - reader := bytes.NewReader(buf.Bytes()) - - b.SetBytes(n) - b.ResetTimer() - for i := 0; i < b.N; i++ { - reader.Seek(0, 0) - ts, err := NewTarSum(reader, true, Version0) - if err != nil { - b.Error(err) - return - } - io.Copy(ioutil.Discard, ts) - ts.Sum(nil) - } -} - -func Benchmark9kTarGzip(b *testing.B) { - buf := bytes.NewBuffer([]byte{}) - fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") - if err != nil { - b.Error(err) - return - } - n, err := io.Copy(buf, fh) - fh.Close() - - reader := bytes.NewReader(buf.Bytes()) - - b.SetBytes(n) - b.ResetTimer() - for i := 0; i < b.N; i++ { - reader.Seek(0, 0) - ts, err := NewTarSum(reader, false, Version0) - if err != nil { - b.Error(err) - return - } - io.Copy(ioutil.Discard, ts) - ts.Sum(nil) - } -} - -// this is a single big file in the tar archive -func Benchmark1mbSingleFileTar(b *testing.B) { - benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) -} - -// this is a single big file in the tar archive -func Benchmark1mbSingleFileTarGzip(b *testing.B) { - benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) -} - -// this is 1024 1k files in the tar archive -func Benchmark1kFilesTar(b *testing.B) { - benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) -} - -// this is 1024 1k files in the tar archive -func Benchmark1kFilesTarGzip(b *testing.B) { - benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) -} - -func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { - var fh *os.File - tarReader := sizedTar(opts) - if br, ok := tarReader.(*os.File); ok { - fh = br - } - defer os.Remove(fh.Name()) - defer fh.Close() - - b.SetBytes(opts.size * opts.num) - b.ResetTimer() - for i := 0; i < b.N; i++ { - ts, err := NewTarSum(fh, !isGzip, Version0) - if err != nil { - b.Error(err) - return - } - io.Copy(ioutil.Discard, ts) - ts.Sum(nil) - fh.Seek(0, 0) - } -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go deleted file mode 100644 index 88e0a578..00000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package tarsum - -import ( - "testing" -) - -func TestVersionLabelForChecksum(t *testing.T) { - version := VersionLabelForChecksum("tarsum+sha256:deadbeef") - if version != "tarsum" { - t.Fatalf("Version should have been 'tarsum', was %v", version) - } - version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef") - if version != "tarsum.v1" { - t.Fatalf("Version should have been 'tarsum.v1', was %v", version) - } - version = VersionLabelForChecksum("something+somethingelse") - if version != "something" { - t.Fatalf("Version should have been 'something', was %v", version) - } - version = VersionLabelForChecksum("invalidChecksum") - if version != "" { - t.Fatalf("Version should have been empty, was %v", version) - } -} - -func TestVersion(t *testing.T) { - expected := "tarsum" - var v Version - if v.String() != expected { - t.Errorf("expected %q, got %q", expected, v.String()) - } - - expected = "tarsum.v1" - v = 1 - if v.String() != expected { - t.Errorf("expected %q, got %q", expected, v.String()) - } - - expected = "tarsum.dev" - v = 2 - if v.String() != expected { - t.Errorf("expected %q, got %q", expected, v.String()) - } -} - -func TestGetVersion(t *testing.T) { - testSet := []struct { - Str string - Expected Version - }{ - {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, - {"tarsum+sha256", Version0}, - {"tarsum", Version0}, - {"tarsum.dev", VersionDev}, - {"tarsum.dev+sha256:deadbeef", VersionDev}, - } - - for _, ts := range testSet { - v, err := GetVersionFromTarsum(ts.Str) - if err != nil { - t.Fatalf("%q : %s", err, ts.Str) - } - if v != ts.Expected { - t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) - } - } - - // test one that does not exist, to ensure it errors - str := "weak+md5:abcdeabcde" - _, err := GetVersionFromTarsum(str) - if err != ErrNotVersion { - t.Fatalf("%q : %s", err, str) - } -} - -func TestGetVersions(t *testing.T) { - expected := []Version{ - Version0, - Version1, - VersionDev, - } - versions := GetVersions() - if len(versions) != len(expected) { - t.Fatalf("Expected %v versions, got %v", len(expected), len(versions)) - } - if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) { - t.Fatalf("Expected [%v], got [%v]", expected, versions) - } -} - -func containsVersion(versions []Version, version Version) bool { - for _, v := range versions { - if v == version { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go new file mode 100644 index 00000000..f5262bcc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/ascii.go @@ -0,0 +1,66 @@ +package term + +import ( + "fmt" + "strings" +) + +// ASCII list the possible supported ASCII key sequence +var ASCII = []string{ + "ctrl-@", + "ctrl-a", + "ctrl-b", + "ctrl-c", + "ctrl-d", + "ctrl-e", + "ctrl-f", + "ctrl-g", + "ctrl-h", + "ctrl-i", + "ctrl-j", + "ctrl-k", + "ctrl-l", + "ctrl-m", + "ctrl-n", + "ctrl-o", + "ctrl-p", + "ctrl-q", + "ctrl-r", + "ctrl-s", + "ctrl-t", + "ctrl-u", + "ctrl-v", + "ctrl-w", + "ctrl-x", + "ctrl-y", + "ctrl-z", + "ctrl-[", + "ctrl-\\", + "ctrl-]", + "ctrl-^", + "ctrl-_", +} + +// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. +func ToBytes(keys string) ([]byte, error) { + codes := []byte{} +next: + for _, key := range strings.Split(keys, ",") { + if len(key) != 1 { + for code, ctrl := range ASCII { + if ctrl == key { + codes = append(codes, byte(code)) + continue next + } + } + if key == "DEL" { + codes = append(codes, 127) + } else { + return nil, fmt.Errorf("Unknown character: '%s'", key) + } + } else { + codes = append(codes, byte(key[0])) + } + } + return codes, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go index 1005084f..a22cd9d1 100644 --- a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go +++ b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go @@ -27,7 +27,6 @@ func MakeRaw(fd uintptr) (*State, error) { newState := oldState.termios C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) - newState.Oflag = newState.Oflag | C.OPOST if err := tcset(fd, &newState); err != 0 { return nil, err } diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go index 7912ae43..11ed2093 100644 --- a/vendor/github.com/docker/docker/pkg/term/term.go +++ b/vendor/github.com/docker/docker/pkg/term/term.go @@ -51,7 +51,7 @@ func GetFdInfo(in interface{}) (uintptr, bool) { func GetWinsize(fd uintptr) (*Winsize, error) { ws := &Winsize{} _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 + // Skip errno = 0 if err == 0 { return ws, nil } @@ -61,7 +61,7 @@ func GetWinsize(fd uintptr) (*Winsize, error) { // SetWinsize tries to set the specified window size for the specified file descriptor. func SetWinsize(fd uintptr, ws *Winsize) error { _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 + // Skip errno = 0 if err == 0 { return nil } @@ -127,6 +127,5 @@ func handleInterrupt(fd uintptr, state *State) { go func() { _ = <-sigchan RestoreTerminal(fd, state) - os.Exit(0) }() } diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go deleted file mode 100644 index 04870d1b..00000000 --- a/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ /dev/null @@ -1,263 +0,0 @@ -// +build windows - -package term - -import ( - "fmt" - "io" - "os" - "os/signal" - "syscall" - - "github.com/Azure/go-ansiterm/winterm" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/term/windows" -) - -// State holds the console mode for the terminal. -type State struct { - mode uint32 -} - -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -// StdStreams returns the standard streams (stdin, stdout, stedrr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - switch { - case os.Getenv("ConEmuANSI") == "ON": - // The ConEmu shell emulates ANSI well by default. - return os.Stdin, os.Stdout, os.Stderr - case os.Getenv("MSYSTEM") != "": - // MSYS (mingw) does not emulate ANSI well. - return windows.ConsoleStreams() - default: - if useNativeConsole() { - return os.Stdin, os.Stdout, os.Stderr - } - return windows.ConsoleStreams() - } -} - -// useNativeConsole determines if the docker client should use the built-in -// console which supports ANSI emulation, or fall-back to the golang emulator -// (github.com/azure/go-ansiterm). -func useNativeConsole() bool { - osv, err := system.GetOSVersion() - if err != nil { - return false - } - - // Native console is not available major version 10 - if osv.MajorVersion < 10 { - return false - } - - // Must have a late pre-release TP4 build of Windows Server 2016/Windows 10 TH2 or later - if osv.Build < 10578 { - return false - } - - // Environment variable override - if e := os.Getenv("USE_NATIVE_CONSOLE"); e != "" { - if e == "1" { - return true - } - return false - } - - // Get the handle to stdout - stdOutHandle, err := syscall.GetStdHandle(syscall.STD_OUTPUT_HANDLE) - if err != nil { - return false - } - - // Get the console mode from the consoles stdout handle - var mode uint32 - if err := syscall.GetConsoleMode(stdOutHandle, &mode); err != nil { - return false - } - - // Legacy mode does not have native ANSI emulation. - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - const enableVirtualTerminalProcessing = 0x0004 - if mode&enableVirtualTerminalProcessing == 0 { - return false - } - - // TODO Windows (Post TP4). The native emulator still has issues which - // mean it shouldn't be enabled for everyone. Change this next line to true - // to change the default to "enable if available". In the meantime, users - // can still try it out by using USE_NATIVE_CONSOLE env variable. - return false -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - return windows.GetHandleInfo(in) -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - x: 0, - y: 0} - - // Note: GetWinsize is called frequently -- uncomment only for excessive details - // logrus.Debugf("[windows] GetWinsize: Console(%v)", info.String()) - // logrus.Debugf("[windows] GetWinsize: Width(%v), Height(%v), x(%v), y(%v)", winsize.Width, winsize.Height, winsize.x, winsize.y) - return winsize, nil -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - - // Ensure the requested dimensions are no larger than the maximum window size - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return err - } - - if ws.Width == 0 || ws.Height == 0 || ws.Width > uint16(info.MaximumWindowSize.X) || ws.Height > uint16(info.MaximumWindowSize.Y) { - return fmt.Errorf("Illegal window size: (%v,%v) -- Maximum allow: (%v,%v)", - ws.Width, ws.Height, info.MaximumWindowSize.X, info.MaximumWindowSize.Y) - } - - // Narrow the sizes to that used by Windows - width := winterm.SHORT(ws.Width) - height := winterm.SHORT(ws.Height) - - // Set the dimensions while ensuring they remain within the bounds of the backing console buffer - // -- Shrinking will always succeed. Growing may push the edges past the buffer boundary. When that occurs, - // shift the upper left just enough to keep the new window within the buffer. - rect := info.Window - if width < rect.Right-rect.Left+1 { - rect.Right = rect.Left + width - 1 - } else if width > rect.Right-rect.Left+1 { - rect.Right = rect.Left + width - 1 - if rect.Right >= info.Size.X { - rect.Left = info.Size.X - width - rect.Right = info.Size.X - 1 - } - } - - if height < rect.Bottom-rect.Top+1 { - rect.Bottom = rect.Top + height - 1 - } else if height > rect.Bottom-rect.Top+1 { - rect.Bottom = rect.Top + height - 1 - if rect.Bottom >= info.Size.Y { - rect.Top = info.Size.Y - height - rect.Bottom = info.Size.Y - 1 - } - } - logrus.Debugf("[windows] SetWinsize: Requested((%v,%v)) Actual(%v)", ws.Width, ws.Height, rect) - - return winterm.SetConsoleWindowInfo(fd, true, rect) -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - return windows.IsConsole(fd) -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - return winterm.SetConsoleMode(fd, state.mode) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - mode, e := winterm.GetConsoleMode(fd) - if e != nil { - return nil, e - } - return &State{mode}, nil -} - -// DisableEcho disables echo for the terminal connected to the given file descriptor. -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx -func DisableEcho(fd uintptr, state *State) error { - mode := state.mode - mode &^= winterm.ENABLE_ECHO_INPUT - mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT - - err := winterm.SetConsoleMode(fd, mode) - if err != nil { - return err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state. -func SetRawTerminal(fd uintptr) (*State, error) { - state, err := MakeRaw(fd) - if err != nil { - return nil, err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return state, err -} - -// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - mode := state.mode - - // Disable these modes - mode &^= winterm.ENABLE_ECHO_INPUT - mode &^= winterm.ENABLE_LINE_INPUT - mode &^= winterm.ENABLE_MOUSE_INPUT - mode &^= winterm.ENABLE_WINDOW_INPUT - mode &^= winterm.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= winterm.ENABLE_EXTENDED_FLAGS - mode |= winterm.ENABLE_INSERT_MODE - mode |= winterm.ENABLE_QUICK_EDIT_MODE - - err = winterm.SetConsoleMode(fd, mode) - if err != nil { - return nil, err - } - return state, nil -} - -func restoreAtInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go b/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go new file mode 100644 index 00000000..ed843ad6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go @@ -0,0 +1,69 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA +) + +// Termios magic numbers, passthrough to the ones defined in syscall. +const ( + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go deleted file mode 100644 index bb47e120..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go +++ /dev/null @@ -1,256 +0,0 @@ -// +build windows - -package windows - -import ( - "bytes" - "errors" - "fmt" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte - // TODO(azlinux): Remove this and hard-code the string -- it is not going to change - escapeSequence []byte -} - -func newAnsiReader(nFile int) *ansiReader { - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - logger.Debugf("Reading previously cached bytes") - - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar.fd, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - logger.Debug("No input events detected") - return 0, nil - } - - keyBytes := translateKeyEvents(events, ar.escapeSequence) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - logger.Debug("No key bytes returned from the translator") - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("Unexpected copy length encountered.") - } - - logger.Debugf("Read p[%d]: % x", copiedLength, p) - logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } - logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[winterm.WORD]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[winterm.WORD]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) - } - - return string(keyEvent.UnicodeChar) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key winterm.WORD, controlState winterm.DWORD, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState winterm.DWORD) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go deleted file mode 100644 index 9f3232c0..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build windows - -package windows - -import ( - "io/ioutil" - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -func newAnsiWriter(nFile int) *ansiWriter { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - logger.Infof("newAnsiWriter: parser %p", parser) - - aw := &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } - - logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) - logger.Infof("newAnsiWriter: %v", aw) - return aw -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - logger.Infof("Write: % x", p) - logger.Infof("Write: %s", string(p)) - return aw.parser.Parse(p) -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go deleted file mode 100644 index 3711d988..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/console.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build windows - -package windows - -import ( - "io" - "os" - "syscall" - - "github.com/Azure/go-ansiterm/winterm" -) - -// ConsoleStreams returns a wrapped version for each standard stream referencing a console, -// that handles ANSI character sequences. -func ConsoleStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - if IsConsole(os.Stdin.Fd()) { - stdIn = newAnsiReader(syscall.STD_INPUT_HANDLE) - } else { - stdIn = os.Stdin - } - - if IsConsole(os.Stdout.Fd()) { - stdOut = newAnsiWriter(syscall.STD_OUTPUT_HANDLE) - } else { - stdOut = os.Stdout - } - - if IsConsole(os.Stderr.Fd()) { - stdErr = newAnsiWriter(syscall.STD_ERROR_HANDLE) - } else { - stdErr = os.Stderr - } - - return stdIn, stdOut, stdErr -} - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = IsConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -func IsConsole(fd uintptr) bool { - _, e := winterm.GetConsoleMode(fd) - return e == nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go deleted file mode 100644 index bf4c7b50..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/windows.go +++ /dev/null @@ -1,5 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windows diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go b/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go deleted file mode 100644 index 52aeab54..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go +++ /dev/null @@ -1,3 +0,0 @@ -// This file is necessary to pass the Docker tests. - -package windows diff --git a/vendor/github.com/docker/docker/pkg/timeutils/json_test.go b/vendor/github.com/docker/docker/pkg/timeutils/json_test.go deleted file mode 100644 index 1ff33317..00000000 --- a/vendor/github.com/docker/docker/pkg/timeutils/json_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package timeutils - -import ( - "testing" - "time" -) - -// Testing to ensure 'year' fields is between 0 and 9999 -func TestFastMarshalJSONWithInvalidDate(t *testing.T) { - aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local) - json, err := FastMarshalJSON(aTime) - if err == nil { - t.Fatalf("FastMarshalJSON should throw an error, but was '%v'", json) - } - anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local) - json, err = FastMarshalJSON(anotherTime) - if err == nil { - t.Fatalf("FastMarshalJSON should throw an error, but was '%v'", json) - } - -} - -func TestFastMarshalJSON(t *testing.T) { - aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC) - json, err := FastMarshalJSON(aTime) - if err != nil { - t.Fatal(err) - } - expected := "\"2015-05-29T11:01:02.000000003Z\"" - if json != expected { - t.Fatalf("Expected %v, got %v", expected, json) - } - - location, err := time.LoadLocation("Europe/Paris") - if err != nil { - t.Fatal(err) - } - aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location) - json, err = FastMarshalJSON(aTime) - if err != nil { - t.Fatal(err) - } - expected = "\"2015-05-29T11:01:02.000000003+02:00\"" - if json != expected { - t.Fatalf("Expected %v, got %v", expected, json) - } -} diff --git a/vendor/github.com/docker/docker/pkg/timeutils/utils.go b/vendor/github.com/docker/docker/pkg/timeutils/utils.go deleted file mode 100644 index 8437f124..00000000 --- a/vendor/github.com/docker/docker/pkg/timeutils/utils.go +++ /dev/null @@ -1,36 +0,0 @@ -package timeutils - -import ( - "strconv" - "strings" - "time" -) - -// GetTimestamp tries to parse given string as golang duration, -// then RFC3339 time and finally as a Unix timestamp. If -// any of these were successful, it returns a Unix timestamp -// as string otherwise returns the given value back. -// In case of duration input, the returned timestamp is computed -// as the given reference time minus the amount of the duration. -func GetTimestamp(value string, reference time.Time) string { - if d, err := time.ParseDuration(value); value != "0" && err == nil { - return strconv.FormatInt(reference.Add(-d).Unix(), 10) - } - - var format string - if strings.Contains(value, ".") { - format = time.RFC3339Nano - } else { - format = time.RFC3339 - } - - loc := time.FixedZone(time.Now().Zone()) - if len(value) < len(format) { - format = format[:len(value)] - } - t, err := time.ParseInLocation(format, value, loc) - if err != nil { - return value - } - return strconv.FormatInt(t.Unix(), 10) -} diff --git a/vendor/github.com/docker/docker/pkg/timeutils/utils_test.go b/vendor/github.com/docker/docker/pkg/timeutils/utils_test.go deleted file mode 100644 index f71dcb53..00000000 --- a/vendor/github.com/docker/docker/pkg/timeutils/utils_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package timeutils - -import ( - "fmt" - "testing" - "time" -) - -func TestGetTimestamp(t *testing.T) { - now := time.Now() - cases := []struct{ in, expected string }{ - {"0", "-62167305600"}, // 0 gets parsed year 0 - - // Partial RFC3339 strings get parsed with second precision - {"2006-01-02T15:04:05.999999999+07:00", "1136189045"}, - {"2006-01-02T15:04:05.999999999Z", "1136214245"}, - {"2006-01-02T15:04:05.999999999", "1136214245"}, - {"2006-01-02T15:04:05", "1136214245"}, - {"2006-01-02T15:04", "1136214240"}, - {"2006-01-02T15", "1136214000"}, - {"2006-01-02T", "1136160000"}, - {"2006-01-02", "1136160000"}, - {"2006", "1136073600"}, - {"2015-05-13T20:39:09Z", "1431549549"}, - - // unix timestamps returned as is - {"1136073600", "1136073600"}, - - // Durations - {"1m", fmt.Sprintf("%d", now.Add(-1*time.Minute).Unix())}, - {"1.5h", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix())}, - {"1h30m", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix())}, - - // String fallback - {"invalid", "invalid"}, - } - - for _, c := range cases { - o := GetTimestamp(c.in, now) - if o != c.expected { - t.Fatalf("wrong value for '%s'. expected:'%s' got:'%s'", c.in, c.expected, o) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go new file mode 100644 index 00000000..02610b8b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go @@ -0,0 +1,137 @@ +// Package truncindex provides a general 'index tree', used by Docker +// in order to be able to reference containers by only a few unambiguous +// characters of their id. +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/patricia" +) + +var ( + // ErrEmptyPrefix is an error returned if the prefix was empty. + ErrEmptyPrefix = errors.New("Prefix can't be empty") + + // ErrIllegalChar is returned when a space is in the ID + ErrIllegalChar = errors.New("illegal character: ' '") + + // ErrNotExist is returned when ID or its prefix not found in index. + ErrNotExist = errors.New("ID does not exist") +) + +// ErrAmbiguousPrefix is returned if the prefix was ambiguous +// (multiple ids for the prefix). +type ErrAmbiguousPrefix struct { + prefix string +} + +func (e ErrAmbiguousPrefix) Error() string { + return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + + // Change patricia max prefix per node length, + // because our len(ID) always 64 + trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), + } + for _, id := range ids { + idx.addID(id) + } + return +} + +func (idx *TruncIndex) addID(id string) error { + if strings.Contains(id, " ") { + return ErrIllegalChar + } + if id == "" { + return ErrEmptyPrefix + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("failed to insert id: %s", id) + } + return nil +} + +// Add adds a new ID to the TruncIndex. +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + if err := idx.addID(id); err != nil { + return err + } + return nil +} + +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("no such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("no such id: '%s'", id) + } + return nil +} + +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Get(s string) (string, error) { + if s == "" { + return "", ErrEmptyPrefix + } + var ( + id string + ) + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return ErrAmbiguousPrefix{prefix: string(prefix)} + } + id = string(prefix) + return nil + } + + idx.RLock() + defer idx.RUnlock() + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", err + } + if id != "" { + return id, nil + } + return "", ErrNotExist +} + +// Iterate iterates over all stored IDs, and passes each of them to the given handler. +func (idx *TruncIndex) Iterate(handler func(id string)) { + idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + handler(string(prefix)) + return nil + }) +} diff --git a/vendor/github.com/docker/docker/pkg/ulimit/ulimit.go b/vendor/github.com/docker/docker/pkg/ulimit/ulimit.go deleted file mode 100644 index 8fb0d804..00000000 --- a/vendor/github.com/docker/docker/pkg/ulimit/ulimit.go +++ /dev/null @@ -1,111 +0,0 @@ -// Package ulimit provides structure and helper function to parse and represent -// resource limits (Rlimit and Ulimit, its human friendly version). -package ulimit - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// Parse parses and returns a Ulimit from the specified string. -func Parse(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - limitVals := strings.SplitN(parts[1], ":", 2) - if len(limitVals) > 2 { - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - soft, err := strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - - hard := soft // in case no hard was set - if len(limitVals) == 2 { - hard, err = strconv.ParseInt(limitVals[1], 10, 64) - } - if soft > hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard) - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/vendor/github.com/docker/docker/pkg/ulimit/ulimit_test.go b/vendor/github.com/docker/docker/pkg/ulimit/ulimit_test.go deleted file mode 100644 index 1e8c881f..00000000 --- a/vendor/github.com/docker/docker/pkg/ulimit/ulimit_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package ulimit - -import "testing" - -func TestParseValid(t *testing.T) { - u1 := &Ulimit{"nofile", 1024, 512} - if u2, _ := Parse("nofile=512:1024"); *u1 != *u2 { - t.Fatalf("expected %q, but got %q", u1, u2) - } -} - -func TestParseInvalidLimitType(t *testing.T) { - if _, err := Parse("notarealtype=1024:1024"); err == nil { - t.Fatalf("expected error on invalid ulimit type") - } -} - -func TestParseBadFormat(t *testing.T) { - if _, err := Parse("nofile:1024:1024"); err == nil { - t.Fatal("expected error on bad syntax") - } - - if _, err := Parse("nofile"); err == nil { - t.Fatal("expected error on bad syntax") - } - - if _, err := Parse("nofile="); err == nil { - t.Fatal("expected error on bad syntax") - } - if _, err := Parse("nofile=:"); err == nil { - t.Fatal("expected error on bad syntax") - } - if _, err := Parse("nofile=:1024"); err == nil { - t.Fatal("expected error on bad syntax") - } -} - -func TestParseHardLessThanSoft(t *testing.T) { - if _, err := Parse("nofile:1024:1"); err == nil { - t.Fatal("expected error on hard limit less than soft limit") - } -} - -func TestParseInvalidValueType(t *testing.T) { - if _, err := Parse("nofile:asdf"); err == nil { - t.Fatal("expected error on bad value type") - } -} - -func TestStringOutput(t *testing.T) { - u := &Ulimit{"nofile", 1024, 512} - if s := u.String(); s != "nofile=512:1024" { - t.Fatal("expected String to return nofile=512:1024, but got", s) - } -} diff --git a/vendor/github.com/docker/docker/pkg/units/duration_test.go b/vendor/github.com/docker/docker/pkg/units/duration_test.go deleted file mode 100644 index fcfb6b7b..00000000 --- a/vendor/github.com/docker/docker/pkg/units/duration_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package units - -import ( - "testing" - "time" -) - -func TestHumanDuration(t *testing.T) { - // Useful duration abstractions - day := 24 * time.Hour - week := 7 * day - month := 30 * day - year := 365 * day - - assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) - assertEquals(t, "47 seconds", HumanDuration(47*time.Second)) - assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) - assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) - assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) - assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) - assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) - assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute)) - assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) - assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute)) - assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) - assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) - assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) - assertEquals(t, "2 days", HumanDuration(2*day)) - assertEquals(t, "7 days", HumanDuration(7*day)) - assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) - assertEquals(t, "2 weeks", HumanDuration(2*week)) - assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) - assertEquals(t, "3 weeks", HumanDuration(3*week)) - assertEquals(t, "4 weeks", HumanDuration(4*week)) - assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) - assertEquals(t, "4 weeks", HumanDuration(1*month)) - assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) - assertEquals(t, "8 weeks", HumanDuration(2*month)) - assertEquals(t, "3 months", HumanDuration(3*month+1*week)) - assertEquals(t, "5 months", HumanDuration(5*month+2*week)) - assertEquals(t, "13 months", HumanDuration(13*month)) - assertEquals(t, "23 months", HumanDuration(23*month)) - assertEquals(t, "24 months", HumanDuration(24*month)) - assertEquals(t, "2 years", HumanDuration(24*month+2*week)) - assertEquals(t, "3 years", HumanDuration(3*year+2*month)) -} diff --git a/vendor/github.com/docker/docker/pkg/units/size_test.go b/vendor/github.com/docker/docker/pkg/units/size_test.go deleted file mode 100644 index 67c3b81e..00000000 --- a/vendor/github.com/docker/docker/pkg/units/size_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package units - -import ( - "reflect" - "runtime" - "strings" - "testing" -) - -func TestBytesSize(t *testing.T) { - assertEquals(t, "1 KiB", BytesSize(1024)) - assertEquals(t, "1 MiB", BytesSize(1024*1024)) - assertEquals(t, "1 MiB", BytesSize(1048576)) - assertEquals(t, "2 MiB", BytesSize(2*MiB)) - assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB)) - assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB)) - assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB)) -} - -func TestHumanSize(t *testing.T) { - assertEquals(t, "1 kB", HumanSize(1000)) - assertEquals(t, "1.024 kB", HumanSize(1024)) - assertEquals(t, "1 MB", HumanSize(1000000)) - assertEquals(t, "1.049 MB", HumanSize(1048576)) - assertEquals(t, "2 MB", HumanSize(2*MB)) - assertEquals(t, "3.42 GB", HumanSize(float64(3.42*GB))) - assertEquals(t, "5.372 TB", HumanSize(float64(5.372*TB))) - assertEquals(t, "2.22 PB", HumanSize(float64(2.22*PB))) -} - -func TestFromHumanSize(t *testing.T) { - assertSuccessEquals(t, 32, FromHumanSize, "32") - assertSuccessEquals(t, 32, FromHumanSize, "32b") - assertSuccessEquals(t, 32, FromHumanSize, "32B") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") - assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") - assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") - assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") - assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") - - assertError(t, FromHumanSize, "") - assertError(t, FromHumanSize, "hello") - assertError(t, FromHumanSize, "-32") - assertError(t, FromHumanSize, "32.3") - assertError(t, FromHumanSize, " 32 ") - assertError(t, FromHumanSize, "32.3Kb") - assertError(t, FromHumanSize, "32 mb") - assertError(t, FromHumanSize, "32m b") - assertError(t, FromHumanSize, "32bm") -} - -func TestRAMInBytes(t *testing.T) { - assertSuccessEquals(t, 32, RAMInBytes, "32") - assertSuccessEquals(t, 32, RAMInBytes, "32b") - assertSuccessEquals(t, 32, RAMInBytes, "32B") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") - assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") - assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") - assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") - - assertError(t, RAMInBytes, "") - assertError(t, RAMInBytes, "hello") - assertError(t, RAMInBytes, "-32") - assertError(t, RAMInBytes, "32.3") - assertError(t, RAMInBytes, " 32 ") - assertError(t, RAMInBytes, "32.3Kb") - assertError(t, RAMInBytes, "32 mb") - assertError(t, RAMInBytes, "32m b") - assertError(t, RAMInBytes, "32bm") -} - -func assertEquals(t *testing.T, expected, actual interface{}) { - if expected != actual { - t.Errorf("Expected '%v' but got '%v'", expected, actual) - } -} - -// func that maps to the parse function signatures as testing abstraction -type parseFn func(string) (int64, error) - -// Define 'String()' for pretty-print -func (fn parseFn) String() string { - fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() - return fnName[strings.LastIndex(fnName, ".")+1:] -} - -func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { - res, err := fn(arg) - if err != nil || res != expected { - t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) - } -} - -func assertError(t *testing.T, fn parseFn, arg string) { - res, err := fn(arg) - if err == nil && res != -1 { - t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) - } -} diff --git a/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go index f7094b1f..1135a4c6 100644 --- a/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go +++ b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go @@ -11,7 +11,7 @@ var ( validPrefixes = map[string][]string{ "url": {"http://", "https://"}, "git": {"git://", "github.com/", "git@"}, - "transport": {"tcp://", "udp://", "unix://"}, + "transport": {"tcp://", "tcp+tls://", "udp://", "unix://"}, } urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") ) @@ -35,7 +35,7 @@ func IsGitTransport(str string) bool { return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") } -// IsTransportURL returns true if the provided str is a transport (tcp, udp, unix) URL. +// IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. func IsTransportURL(str string) bool { return checkURL(str, "transport") } diff --git a/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go b/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go deleted file mode 100644 index bb89d8b5..00000000 --- a/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package urlutil - -import "testing" - -var ( - gitUrls = []string{ - "git://github.com/docker/docker", - "git@github.com:docker/docker.git", - "git@bitbucket.org:atlassianlabs/atlassian-docker.git", - "https://github.com/docker/docker.git", - "http://github.com/docker/docker.git", - "http://github.com/docker/docker.git#branch", - "http://github.com/docker/docker.git#:dir", - } - incompleteGitUrls = []string{ - "github.com/docker/docker", - } - invalidGitUrls = []string{ - "http://github.com/docker/docker.git:#branch", - } -) - -func TestValidGitTransport(t *testing.T) { - for _, url := range gitUrls { - if IsGitTransport(url) == false { - t.Fatalf("%q should be detected as valid Git prefix", url) - } - } - - for _, url := range incompleteGitUrls { - if IsGitTransport(url) == true { - t.Fatalf("%q should not be detected as valid Git prefix", url) - } - } -} - -func TestIsGIT(t *testing.T) { - for _, url := range gitUrls { - if IsGitURL(url) == false { - t.Fatalf("%q should be detected as valid Git url", url) - } - } - - for _, url := range incompleteGitUrls { - if IsGitURL(url) == false { - t.Fatalf("%q should be detected as valid Git url", url) - } - } - - for _, url := range invalidGitUrls { - if IsGitURL(url) == true { - t.Fatalf("%q should not be detected as valid Git prefix", url) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent.go b/vendor/github.com/docker/docker/pkg/useragent/useragent.go index a4109748..1137db51 100644 --- a/vendor/github.com/docker/docker/pkg/useragent/useragent.go +++ b/vendor/github.com/docker/docker/pkg/useragent/useragent.go @@ -29,8 +29,8 @@ func (vi *VersionInfo) isValid() bool { // // Each VersionInfo will be converted to a string in the format of // "product/version", where the "product" is get from the name field, while -// version is get from the version field. Several pieces of verson information -// will be concatinated and separated by space. +// version is get from the version field. Several pieces of version information +// will be concatenated and separated by space. // // Example: // AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go b/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go deleted file mode 100644 index 0ad7243a..00000000 --- a/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package useragent - -import "testing" - -func TestVersionInfo(t *testing.T) { - vi := VersionInfo{"foo", "bar"} - if !vi.isValid() { - t.Fatalf("VersionInfo should be valid") - } - vi = VersionInfo{"", "bar"} - if vi.isValid() { - t.Fatalf("Expected VersionInfo to be invalid") - } - vi = VersionInfo{"foo", ""} - if vi.isValid() { - t.Fatalf("Expected VersionInfo to be invalid") - } -} - -func TestAppendVersions(t *testing.T) { - vis := []VersionInfo{ - {"foo", "1.0"}, - {"bar", "0.1"}, - {"pi", "3.1.4"}, - } - v := AppendVersions("base", vis...) - expect := "base foo/1.0 bar/0.1 pi/3.1.4" - if v != expect { - t.Fatalf("expected %q, got %q", expect, v) - } -} diff --git a/vendor/github.com/docker/docker/pkg/version/version.go b/vendor/github.com/docker/docker/pkg/version/version.go index bd5ec7a8..c001279f 100644 --- a/vendor/github.com/docker/docker/pkg/version/version.go +++ b/vendor/github.com/docker/docker/pkg/version/version.go @@ -37,6 +37,11 @@ func (v Version) compareTo(other Version) int { return 0 } +// String returns the version string +func (v Version) String() string { + return string(v) +} + // LessThan checks if a version is less than another func (v Version) LessThan(other Version) bool { return v.compareTo(other) == -1 diff --git a/vendor/github.com/docker/docker/pkg/version/version_test.go b/vendor/github.com/docker/docker/pkg/version/version_test.go deleted file mode 100644 index c02ec40f..00000000 --- a/vendor/github.com/docker/docker/pkg/version/version_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package version - -import ( - "testing" -) - -func assertVersion(t *testing.T, a, b string, result int) { - if r := Version(a).compareTo(Version(b)); r != result { - t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) - } -} - -func TestCompareVersion(t *testing.T) { - assertVersion(t, "1.12", "1.12", 0) - assertVersion(t, "1.0.0", "1", 0) - assertVersion(t, "1", "1.0.0", 0) - assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) - assertVersion(t, "1", "1.0.1", -1) - assertVersion(t, "1.0.1", "1", 1) - assertVersion(t, "1.0.1", "1.0.2", -1) - assertVersion(t, "1.0.2", "1.0.3", -1) - assertVersion(t, "1.0.3", "1.1", -1) - assertVersion(t, "1.1", "1.1.1", -1) - assertVersion(t, "1.1.1", "1.1.2", -1) - assertVersion(t, "1.1.2", "1.2", -1) - -} diff --git a/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go b/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go new file mode 100644 index 00000000..51dfa5cf --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go @@ -0,0 +1,115 @@ +// +build linux + +package apparmor + +import ( + "bufio" + "io" + "os" + "path" + "strings" + + "github.com/docker/docker/pkg/aaparser" + "github.com/docker/docker/utils/templates" +) + +var ( + // profileDirectory is the file store for apparmor profiles and macros. + profileDirectory = "/etc/apparmor.d" + // defaultProfilePath is the default path for the apparmor profile to be saved. + defaultProfilePath = path.Join(profileDirectory, "docker") +) + +// profileData holds information about the given profile for generation. +type profileData struct { + // Name is profile name. + Name string + // Imports defines the apparmor functions to import, before defining the profile. + Imports []string + // InnerImports defines the apparmor functions to import in the profile. + InnerImports []string + // Version is the {major, minor, patch} version of apparmor_parser as a single number. + Version int +} + +// generateDefault creates an apparmor profile from ProfileData. +func (p *profileData) generateDefault(out io.Writer) error { + compiled, err := templates.NewParse("apparmor_profile", baseTemplate) + if err != nil { + return err + } + + if macroExists("tunables/global") { + p.Imports = append(p.Imports, "#include ") + } else { + p.Imports = append(p.Imports, "@{PROC}=/proc/") + } + + if macroExists("abstractions/base") { + p.InnerImports = append(p.InnerImports, "#include ") + } + + ver, err := aaparser.GetVersion() + if err != nil { + return err + } + p.Version = ver + + if err := compiled.Execute(out, p); err != nil { + return err + } + return nil +} + +// macrosExists checks if the passed macro exists. +func macroExists(m string) bool { + _, err := os.Stat(path.Join(profileDirectory, m)) + return err == nil +} + +// InstallDefault generates a default profile and installs it in the +// ProfileDirectory with `apparmor_parser`. +func InstallDefault(name string) error { + // Make sure the path where they want to save the profile exists + if err := os.MkdirAll(profileDirectory, 0755); err != nil { + return err + } + + p := profileData{ + Name: name, + } + + f, err := os.OpenFile(defaultProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + if err := p.generateDefault(f); err != nil { + f.Close() + return err + } + f.Close() + + if err := aaparser.LoadProfile(defaultProfilePath); err != nil { + return err + } + + return nil +} + +// IsLoaded checks if a passed profile has been loaded into the kernel. +func IsLoaded(name string) error { + file, err := os.Open("/sys/kernel/security/apparmor/profiles") + if err != nil { + return err + } + r := bufio.NewReader(file) + for { + p, err := r.ReadString('\n') + if err != nil { + return err + } + if strings.HasPrefix(p, name+" ") { + return nil + } + } +} diff --git a/vendor/github.com/docker/docker/profiles/apparmor/template.go b/vendor/github.com/docker/docker/profiles/apparmor/template.go new file mode 100644 index 00000000..ada33bf0 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/apparmor/template.go @@ -0,0 +1,46 @@ +// +build linux + +package apparmor + +// baseTemplate defines the default apparmor profile for containers. +const baseTemplate = ` +{{range $value := .Imports}} +{{$value}} +{{end}} + +profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { +{{range $value := .InnerImports}} + {{$value}} +{{end}} + + network, + capability, + file, + umount, + + deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) + # deny write to files not in /proc//** or /proc/sys/** + deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w, + deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) + deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/kcore rwklx, + + deny mount, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/efi/efivars/** rwklx, + deny /sys/kernel/security/** rwklx, + +{{if ge .Version 208095}} + # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container + ptrace (trace,read) peer=docker-default, +{{end}} +} +` diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default.json b/vendor/github.com/docker/docker/profiles/seccomp/default.json new file mode 100755 index 00000000..5c70f88a --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/default.json @@ -0,0 +1,1628 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "architectures": [ + "SCMP_ARCH_X86_64", + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ], + "syscalls": [ + { + "name": "accept", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "accept4", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "access", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "alarm", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "arch_prctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "bind", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "brk", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "capget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "capset", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chmod", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chown32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chroot", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clock_getres", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clock_gettime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clock_nanosleep", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clone", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + }, + { + "name": "close", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "connect", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "copy_file_range", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "creat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "dup", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "dup2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "dup3", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_create1", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_ctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_ctl_old", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_pwait", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_wait", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_wait_old", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "eventfd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "eventfd2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "execve", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "execveat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "exit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "exit_group", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "faccessat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fadvise64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fadvise64_64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fallocate", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fanotify_init", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fanotify_mark", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchmod", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchmodat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchown32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchownat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fcntl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fcntl64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fdatasync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fgetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "flistxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "flock", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fork", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fremovexattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fsetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstatat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstatfs", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstatfs64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fsync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ftruncate", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ftruncate64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "futex", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "futimesat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getcpu", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getcwd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getdents", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getdents64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getegid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getegid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "geteuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "geteuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgroups", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgroups32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getitimer", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpeername", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpgrp", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getppid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpriority", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getrandom", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getrlimit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "get_robust_list", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getrusage", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getsid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getsockname", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getsockopt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "get_thread_area", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "gettid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "gettimeofday", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_add_watch", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_init", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_init1", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_rm_watch", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_cancel", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ioctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_destroy", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_getevents", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ioprio_get", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ioprio_set", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_setup", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_submit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ipc", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "kill", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lchown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lchown32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lgetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "link", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "linkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "listen", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "listxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "llistxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "_llseek", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lremovexattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lseek", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lsetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lstat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lstat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "madvise", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "memfd_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mincore", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mkdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mkdirat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mknod", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mknodat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mlock", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mlock2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mlockall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mmap", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mmap2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mprotect", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_getsetattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_notify", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_open", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_timedreceive", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_timedsend", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_unlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mremap", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgrcv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgsnd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "munlock", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "munlockall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "munmap", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "nanosleep", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "newfstatat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "_newselect", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "open", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "openat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pause", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "personality", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "name": "personality", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "name": "personality", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "name": "pipe", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pipe2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "poll", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ppoll", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "prctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pread64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "preadv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "prlimit64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pselect6", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pwrite64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pwritev", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "read", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readahead", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readlinkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recvfrom", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recvmmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recvmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "remap_file_pages", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "removexattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rename", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "renameat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "renameat2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "restart_syscall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rmdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigaction", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigpending", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigprocmask", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigqueueinfo", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigreturn", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigsuspend", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigtimedwait", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_tgsigqueueinfo", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getaffinity", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getparam", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_get_priority_max", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_get_priority_min", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getscheduler", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_rr_get_interval", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setaffinity", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setparam", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setscheduler", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_yield", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "seccomp", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "select", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semop", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semtimedop", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "send", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendfile", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendfile64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendmmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendto", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setdomainname", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgroups", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgroups32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sethostname", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setitimer", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setpgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setpriority", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setregid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setregid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setreuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setreuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setrlimit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_robust_list", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setsid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setsockopt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_thread_area", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_tid_address", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmdt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shutdown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sigaltstack", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "signalfd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "signalfd4", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sigreturn", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "socket", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "socketcall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "socketpair", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "splice", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "stat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "stat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "statfs", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "statfs64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "symlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "symlinkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sync_file_range", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "syncfs", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sysinfo", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "syslog", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "tee", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "tgkill", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "time", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_delete", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timerfd_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timerfd_gettime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timerfd_settime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_getoverrun", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_gettime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_settime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "times", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "tkill", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "truncate", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "truncate64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ugetrlimit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "umask", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "uname", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "unlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "unlinkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "utime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "utimensat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "utimes", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "vfork", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "vhangup", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "vmsplice", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "wait4", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "waitid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "waitpid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "write", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "writev", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "modify_ldt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "breakpoint", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "cacheflush", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_tls", + "action": "SCMP_ACT_ALLOW", + "args": [] + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/profiles/seccomp/generate.go b/vendor/github.com/docker/docker/profiles/seccomp/generate.go new file mode 100644 index 00000000..bf565947 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/generate.go @@ -0,0 +1,32 @@ +// +build ignore + +package main + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/profiles/seccomp" +) + +// saves the default seccomp profile as a json file so people can use it as a +// base for their own custom profiles +func main() { + wd, err := os.Getwd() + if err != nil { + panic(err) + } + f := filepath.Join(wd, "default.json") + + // write the default profile to the file + b, err := json.MarshalIndent(seccomp.DefaultProfile, "", "\t") + if err != nil { + panic(err) + } + + if err := ioutil.WriteFile(f, b, 0644); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go new file mode 100644 index 00000000..0718d840 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go @@ -0,0 +1,74 @@ +// +build linux + +package seccomp + +import ( + "encoding/json" + "fmt" + + "github.com/docker/engine-api/types" + "github.com/opencontainers/specs/specs-go" +) + +//go:generate go run -tags 'seccomp' generate.go + +// GetDefaultProfile returns the default seccomp profile. +func GetDefaultProfile() (*specs.Seccomp, error) { + return setupSeccomp(DefaultProfile) +} + +// LoadProfile takes a file path and decodes the seccomp profile. +func LoadProfile(body string) (*specs.Seccomp, error) { + var config types.Seccomp + if err := json.Unmarshal([]byte(body), &config); err != nil { + return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) + } + + return setupSeccomp(&config) +} + +func setupSeccomp(config *types.Seccomp) (newConfig *specs.Seccomp, err error) { + if config == nil { + return nil, nil + } + + // No default action specified, no syscalls listed, assume seccomp disabled + if config.DefaultAction == "" && len(config.Syscalls) == 0 { + return nil, nil + } + + newConfig = &specs.Seccomp{} + + // if config.Architectures == 0 then libseccomp will figure out the architecture to use + if len(config.Architectures) > 0 { + for _, arch := range config.Architectures { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(arch)) + } + } + + newConfig.DefaultAction = specs.Action(config.DefaultAction) + + // Loop through all syscall blocks and convert them to libcontainer format + for _, call := range config.Syscalls { + newCall := specs.Syscall{ + Name: call.Name, + Action: specs.Action(call.Action), + } + + // Loop through all the arguments of the syscall and convert them + for _, arg := range call.Args { + newArg := specs.Arg{ + Index: arg.Index, + Value: arg.Value, + ValueTwo: arg.ValueTwo, + Op: specs.Operator(arg.Op), + } + + newCall.Args = append(newCall.Args, newArg) + } + + newConfig.Syscalls = append(newConfig.Syscalls, newCall) + } + + return newConfig, nil +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go new file mode 100644 index 00000000..4fad7a6c --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go @@ -0,0 +1,1659 @@ +// +build linux,seccomp + +package seccomp + +import ( + "syscall" + + "github.com/docker/engine-api/types" + libseccomp "github.com/seccomp/libseccomp-golang" +) + +func arches() []types.Arch { + var native, err = libseccomp.GetNativeArch() + if err != nil { + return []types.Arch{} + } + var a = native.String() + switch a { + case "amd64": + return []types.Arch{types.ArchX86_64, types.ArchX86, types.ArchX32} + case "arm64": + return []types.Arch{types.ArchARM, types.ArchAARCH64} + case "mips64": + return []types.Arch{types.ArchMIPS, types.ArchMIPS64, types.ArchMIPS64N32} + case "mips64n32": + return []types.Arch{types.ArchMIPS, types.ArchMIPS64, types.ArchMIPS64N32} + case "mipsel64": + return []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64, types.ArchMIPSEL64N32} + case "mipsel64n32": + return []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64, types.ArchMIPSEL64N32} + default: + return []types.Arch{} + } +} + +// DefaultProfile defines the whitelist for the default seccomp profile. +var DefaultProfile = &types.Seccomp{ + DefaultAction: types.ActErrno, + Architectures: arches(), + Syscalls: []*types.Syscall{ + { + Name: "accept", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "accept4", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "access", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "alarm", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "arch_prctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "bind", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "brk", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "capget", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "capset", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chdir", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chmod", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chown32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chroot", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "clock_getres", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "clock_gettime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "clock_nanosleep", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "clone", + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + ValueTwo: 0, + Op: types.OpMaskedEqual, + }, + }, + }, + { + Name: "close", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "connect", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "copy_file_range", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "creat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "dup", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "dup2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "dup3", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_create", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_create1", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_ctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_ctl_old", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_pwait", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_wait", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_wait_old", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "eventfd", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "eventfd2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "execve", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "execveat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "exit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "exit_group", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "faccessat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fadvise64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fadvise64_64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fallocate", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fanotify_init", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fanotify_mark", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchdir", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchmod", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchmodat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchown32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchownat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fcntl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fcntl64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fdatasync", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fgetxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "flistxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "flock", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fork", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fremovexattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fsetxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstat64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstatat64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstatfs", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstatfs64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fsync", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ftruncate", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ftruncate64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "futex", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "futimesat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getcpu", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getcwd", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getdents", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getdents64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getegid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getegid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "geteuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "geteuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getgroups", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getgroups32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getitimer", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpeername", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpgrp", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getppid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpriority", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getrandom", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getresgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getresgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getresuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getresuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getrlimit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "get_robust_list", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getrusage", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getsid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getsockname", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getsockopt", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "get_thread_area", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "gettid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "gettimeofday", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "inotify_add_watch", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "inotify_init", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "inotify_init1", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "inotify_rm_watch", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_cancel", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ioctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_destroy", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_getevents", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ioprio_get", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ioprio_set", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_setup", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_submit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ipc", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "kill", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lchown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lchown32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lgetxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "link", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "linkat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "listen", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "listxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "llistxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "_llseek", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lremovexattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lseek", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lsetxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lstat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lstat64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "madvise", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "memfd_create", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mincore", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mkdir", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mkdirat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mknod", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mknodat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mlock", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mlock2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mlockall", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mmap", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mmap2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mprotect", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_getsetattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_notify", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_open", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_timedreceive", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_timedsend", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_unlink", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mremap", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msgctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msgget", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msgrcv", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msgsnd", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msync", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "munlock", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "munlockall", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "munmap", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "nanosleep", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "newfstatat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "_newselect", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "open", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "openat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pause", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "personality", + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0, + Op: types.OpEqualTo, + }, + }, + }, + { + Name: "personality", + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0008, + Op: types.OpEqualTo, + }, + }, + }, + { + Name: "personality", + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0xffffffff, + Op: types.OpEqualTo, + }, + }, + }, + { + Name: "pipe", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pipe2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "poll", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ppoll", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "prctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pread64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "preadv", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "prlimit64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pselect6", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pwrite64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pwritev", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "read", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "readahead", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "readlink", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "readlinkat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "readv", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "recv", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "recvfrom", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "recvmmsg", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "recvmsg", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "remap_file_pages", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "removexattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rename", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "renameat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "renameat2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "restart_syscall", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rmdir", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigaction", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigpending", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigprocmask", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigqueueinfo", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigreturn", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigsuspend", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigtimedwait", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_tgsigqueueinfo", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_getaffinity", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_getattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_getparam", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_get_priority_max", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_get_priority_min", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_getscheduler", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_rr_get_interval", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_setaffinity", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_setattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_setparam", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_setscheduler", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_yield", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "seccomp", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "select", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "semctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "semget", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "semop", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "semtimedop", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "send", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendfile", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendfile64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendmmsg", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendmsg", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendto", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setdomainname", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setfsgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setfsgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setfsuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setfsuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setgroups", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setgroups32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sethostname", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setitimer", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setpgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setpriority", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setregid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setregid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setresgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setresgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setresuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setresuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setreuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setreuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setrlimit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "set_robust_list", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setsid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setsockopt", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "set_thread_area", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "set_tid_address", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shmat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shmctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shmdt", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shmget", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shutdown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sigaltstack", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "signalfd", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "signalfd4", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sigreturn", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "socket", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "socketcall", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "socketpair", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "splice", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "stat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "stat64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "statfs", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "statfs64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "symlink", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "symlinkat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sync", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sync_file_range", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "syncfs", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sysinfo", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "syslog", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "tee", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "tgkill", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "time", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_create", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_delete", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timerfd_create", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timerfd_gettime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timerfd_settime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_getoverrun", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_gettime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_settime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "times", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "tkill", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "truncate", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "truncate64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ugetrlimit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "umask", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "uname", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "unlink", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "unlinkat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "utime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "utimensat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "utimes", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "vfork", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "vhangup", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "vmsplice", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "wait4", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "waitid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "waitpid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "write", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "writev", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + // i386 specific syscalls + { + Name: "modify_ldt", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + // arm specific syscalls + { + Name: "breakpoint", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "cacheflush", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "set_tls", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }, +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go new file mode 100644 index 00000000..64963292 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go @@ -0,0 +1,10 @@ +// +build linux,!seccomp + +package seccomp + +import "github.com/docker/engine-api/types" + +var ( + // DefaultProfile is a nil pointer on unsupported systems. + DefaultProfile *types.Seccomp +) diff --git a/vendor/github.com/docker/docker/reference/reference.go b/vendor/github.com/docker/docker/reference/reference.go new file mode 100644 index 00000000..cdc8e63a --- /dev/null +++ b/vendor/github.com/docker/docker/reference/reference.go @@ -0,0 +1,211 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digest" + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/image/v1" +) + +const ( + // DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified + DefaultTag = "latest" + // DefaultHostname is the default built-in hostname + DefaultHostname = "docker.io" + // LegacyDefaultHostname is automatically converted to DefaultHostname + LegacyDefaultHostname = "index.docker.io" + // DefaultRepoPrefix is the prefix used for default repositories in default host + DefaultRepoPrefix = "library/" +) + +// Named is an object with a full name +type Named interface { + // Name returns normalized repository name, like "ubuntu". + Name() string + // String returns full reference, like "ubuntu@sha256:abcdef..." + String() string + // FullName returns full repository name with hostname, like "docker.io/library/ubuntu" + FullName() string + // Hostname returns hostname for the reference, like "docker.io" + Hostname() string + // RemoteName returns the repository component of the full name, like "library/ubuntu" + RemoteName() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +func ParseNamed(s string) (Named, error) { + named, err := distreference.ParseNamed(s) + if err != nil { + return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", s) + } + r, err := WithName(named.Name()) + if err != nil { + return nil, err + } + if canonical, isCanonical := named.(distreference.Canonical); isCanonical { + return WithDigest(r, canonical.Digest()) + } + if tagged, isTagged := named.(distreference.NamedTagged); isTagged { + return WithTag(r, tagged.Tag()) + } + return r, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + name, err := normalize(name) + if err != nil { + return nil, err + } + if err := validateName(name); err != nil { + return nil, err + } + r, err := distreference.WithName(name) + if err != nil { + return nil, err + } + return &namedRef{r}, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + r, err := distreference.WithTag(name, tag) + if err != nil { + return nil, err + } + return &taggedRef{namedRef{r}}, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + r, err := distreference.WithDigest(name, digest) + if err != nil { + return nil, err + } + return &canonicalRef{namedRef{r}}, nil +} + +type namedRef struct { + distreference.Named +} +type taggedRef struct { + namedRef +} +type canonicalRef struct { + namedRef +} + +func (r *namedRef) FullName() string { + hostname, remoteName := splitHostname(r.Name()) + return hostname + "/" + remoteName +} +func (r *namedRef) Hostname() string { + hostname, _ := splitHostname(r.Name()) + return hostname +} +func (r *namedRef) RemoteName() string { + _, remoteName := splitHostname(r.Name()) + return remoteName +} +func (r *taggedRef) Tag() string { + return r.namedRef.Named.(distreference.NamedTagged).Tag() +} +func (r *canonicalRef) Digest() digest.Digest { + return r.namedRef.Named.(distreference.Canonical).Digest() +} + +// WithDefaultTag adds a default tag to a reference if it only has a repo name. +func WithDefaultTag(ref Named) Named { + if IsNameOnly(ref) { + ref, _ = WithTag(ref, DefaultTag) + } + return ref +} + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// ParseIDOrReference parses string for a image ID or a reference. ID can be +// without a default prefix. +func ParseIDOrReference(idOrRef string) (digest.Digest, Named, error) { + if err := v1.ValidateID(idOrRef); err == nil { + idOrRef = "sha256:" + idOrRef + } + if dgst, err := digest.ParseDigest(idOrRef); err == nil { + return dgst, nil, nil + } + ref, err := ParseNamed(idOrRef) + return "", ref, err +} + +// splitHostname splits a repository name to hostname and remotename string. +// If no valid hostname is found, the default hostname is used. Repository name +// needs to be already validated before. +func splitHostname(name string) (hostname, remoteName string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + hostname, remoteName = DefaultHostname, name + } else { + hostname, remoteName = name[:i], name[i+1:] + } + if hostname == LegacyDefaultHostname { + hostname = DefaultHostname + } + if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') { + remoteName = DefaultRepoPrefix + remoteName + } + return +} + +// normalize returns a repository name in its normalized form, meaning it +// will not contain default hostname nor library/ prefix for official images. +func normalize(name string) (string, error) { + host, remoteName := splitHostname(name) + if strings.ToLower(remoteName) != remoteName { + return "", errors.New("invalid reference format: repository name must be lowercase") + } + if host == DefaultHostname { + if strings.HasPrefix(remoteName, DefaultRepoPrefix) { + return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil + } + return remoteName, nil + } + return name, nil +} + +func validateName(name string) error { + if err := v1.ValidateID(name); err == nil { + return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) + } + return nil +} diff --git a/vendor/github.com/docker/docker/reference/store.go b/vendor/github.com/docker/docker/reference/store.go new file mode 100644 index 00000000..91c5c2ae --- /dev/null +++ b/vendor/github.com/docker/docker/reference/store.go @@ -0,0 +1,298 @@ +package reference + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" +) + +var ( + // ErrDoesNotExist is returned if a reference is not found in the + // store. + ErrDoesNotExist = errors.New("reference does not exist") +) + +// An Association is a tuple associating a reference with an image ID. +type Association struct { + Ref Named + ImageID image.ID +} + +// Store provides the set of methods which can operate on a tag store. +type Store interface { + References(id image.ID) []Named + ReferencesByName(ref Named) []Association + AddTag(ref Named, id image.ID, force bool) error + AddDigest(ref Canonical, id image.ID, force bool) error + Delete(ref Named) (bool, error) + Get(ref Named) (image.ID, error) +} + +type store struct { + mu sync.RWMutex + // jsonPath is the path to the file where the serialized tag data is + // stored. + jsonPath string + // Repositories is a map of repositories, indexed by name. + Repositories map[string]repository + // referencesByIDCache is a cache of references indexed by ID, to speed + // up References. + referencesByIDCache map[image.ID]map[string]Named +} + +// Repository maps tags to image IDs. The key is a a stringified Reference, +// including the repository name. +type repository map[string]image.ID + +type lexicalRefs []Named + +func (a lexicalRefs) Len() int { return len(a) } +func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalRefs) Less(i, j int) bool { return a[i].String() < a[j].String() } + +type lexicalAssociations []Association + +func (a lexicalAssociations) Len() int { return len(a) } +func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalAssociations) Less(i, j int) bool { return a[i].Ref.String() < a[j].Ref.String() } + +// NewReferenceStore creates a new reference store, tied to a file path where +// the set of references are serialized in JSON format. +func NewReferenceStore(jsonPath string) (Store, error) { + abspath, err := filepath.Abs(jsonPath) + if err != nil { + return nil, err + } + + store := &store{ + jsonPath: abspath, + Repositories: make(map[string]repository), + referencesByIDCache: make(map[image.ID]map[string]Named), + } + // Load the json file if it exists, otherwise create it. + if err := store.reload(); os.IsNotExist(err) { + if err := store.save(); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return store, nil +} + +// AddTag adds a tag reference to the store. If force is set to true, existing +// references can be overwritten. This only works for tags, not digests. +func (store *store) AddTag(ref Named, id image.ID, force bool) error { + if _, isCanonical := ref.(Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + return store.addReference(WithDefaultTag(ref), id, force) +} + +// AddDigest adds a digest reference to the store. +func (store *store) AddDigest(ref Canonical, id image.ID, force bool) error { + return store.addReference(ref, id, force) +} + +func (store *store) addReference(ref Named, id image.ID, force bool) error { + if ref.Name() == string(digest.Canonical) { + return errors.New("refusing to create an ambiguous tag using digest algorithm as name") + } + + store.mu.Lock() + defer store.mu.Unlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists || repository == nil { + repository = make(map[string]image.ID) + store.Repositories[ref.Name()] = repository + } + + refStr := ref.String() + oldID, exists := repository[refStr] + + if exists { + // force only works for tags + if digested, isDigest := ref.(Canonical); isDigest { + return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) + } + + if !force { + return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", ref.String(), oldID.String()) + } + + if store.referencesByIDCache[oldID] != nil { + delete(store.referencesByIDCache[oldID], refStr) + if len(store.referencesByIDCache[oldID]) == 0 { + delete(store.referencesByIDCache, oldID) + } + } + } + + repository[refStr] = id + if store.referencesByIDCache[id] == nil { + store.referencesByIDCache[id] = make(map[string]Named) + } + store.referencesByIDCache[id][refStr] = ref + + return store.save() +} + +// Delete deletes a reference from the store. It returns true if a deletion +// happened, or false otherwise. +func (store *store) Delete(ref Named) (bool, error) { + ref = WithDefaultTag(ref) + + store.mu.Lock() + defer store.mu.Unlock() + + repoName := ref.Name() + + repository, exists := store.Repositories[repoName] + if !exists { + return false, ErrDoesNotExist + } + + refStr := ref.String() + if id, exists := repository[refStr]; exists { + delete(repository, refStr) + if len(repository) == 0 { + delete(store.Repositories, repoName) + } + if store.referencesByIDCache[id] != nil { + delete(store.referencesByIDCache[id], refStr) + if len(store.referencesByIDCache[id]) == 0 { + delete(store.referencesByIDCache, id) + } + } + return true, store.save() + } + + return false, ErrDoesNotExist +} + +// Get retrieves an item from the store by +func (store *store) Get(ref Named) (image.ID, error) { + ref = WithDefaultTag(ref) + + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists || repository == nil { + return "", ErrDoesNotExist + } + + id, exists := repository[ref.String()] + if !exists { + return "", ErrDoesNotExist + } + + return id, nil +} + +// References returns a slice of references to the given image ID. The slice +// will be nil if there are no references to this image ID. +func (store *store) References(id image.ID) []Named { + store.mu.RLock() + defer store.mu.RUnlock() + + // Convert the internal map to an array for two reasons: + // 1) We must not return a mutable + // 2) It would be ugly to expose the extraneous map keys to callers. + + var references []Named + for _, ref := range store.referencesByIDCache[id] { + references = append(references, ref) + } + + sort.Sort(lexicalRefs(references)) + + return references +} + +// ReferencesByName returns the references for a given repository name. +// If there are no references known for this repository name, +// ReferencesByName returns nil. +func (store *store) ReferencesByName(ref Named) []Association { + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists { + return nil + } + + var associations []Association + for refStr, refID := range repository { + ref, err := ParseNamed(refStr) + if err != nil { + // Should never happen + return nil + } + associations = append(associations, + Association{ + Ref: ref, + ImageID: refID, + }) + } + + sort.Sort(lexicalAssociations(associations)) + + return associations +} + +func (store *store) save() error { + // Store the json + jsonData, err := json.Marshal(store) + if err != nil { + return err + } + + tempFilePath := store.jsonPath + ".tmp" + + if err := ioutil.WriteFile(tempFilePath, jsonData, 0600); err != nil { + return err + } + + if err := os.Rename(tempFilePath, store.jsonPath); err != nil { + return err + } + + return nil +} + +func (store *store) reload() error { + f, err := os.Open(store.jsonPath) + if err != nil { + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&store); err != nil { + return err + } + + for _, repository := range store.Repositories { + for refStr, refID := range repository { + ref, err := ParseNamed(refStr) + if err != nil { + // Should never happen + continue + } + if store.referencesByIDCache[refID] == nil { + store.referencesByIDCache[refID] = make(map[string]Named) + } + store.referencesByIDCache[refID][refStr] = ref + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/registry/auth.go b/vendor/github.com/docker/docker/registry/auth.go index 57560935..c5663f58 100644 --- a/vendor/github.com/docker/docker/registry/auth.go +++ b/vendor/github.com/docker/docker/registry/auth.go @@ -1,230 +1,178 @@ package registry import ( - "encoding/json" "fmt" "io/ioutil" "net/http" + "net/url" "strings" + "time" "github.com/Sirupsen/logrus" - "github.com/docker/docker/cliconfig" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" ) -// Login tries to register/login to the registry server. -func Login(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) { - // Separates the v2 registry login logic from the v1 logic. - if registryEndpoint.Version == APIVersion2 { - return loginV2(authConfig, registryEndpoint, "" /* scope */) - } - return loginV1(authConfig, registryEndpoint) -} +const ( + // AuthClientID is used the ClientID used for the token server + AuthClientID = "docker" +) // loginV1 tries to register/login to the v1 registry server. -func loginV1(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) { - var ( - status string - reqBody []byte - err error - reqStatusCode = 0 - serverAddress = authConfig.ServerAddress - ) +func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) { + registryEndpoint, err := apiEndpoint.ToV1Endpoint(userAgent, nil) + if err != nil { + return "", "", err + } - logrus.Debugf("attempting v1 login to registry endpoint %s", registryEndpoint) + serverAddress := registryEndpoint.String() + + logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress) if serverAddress == "" { - return "", fmt.Errorf("Server Error: Server Address not set.") + return "", "", fmt.Errorf("Server Error: Server Address not set.") } loginAgainstOfficialIndex := serverAddress == IndexServer - // to avoid sending the server address to the server it should be removed before being marshalled - authCopy := *authConfig - authCopy.ServerAddress = "" - - jsonBody, err := json.Marshal(authCopy) + req, err := http.NewRequest("GET", serverAddress+"users/", nil) if err != nil { - return "", fmt.Errorf("Config Error: %s", err) + return "", "", err } - - // using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status. - b := strings.NewReader(string(jsonBody)) - req1, err := registryEndpoint.client.Post(serverAddress+"users/", "application/json; charset=utf-8", b) - if err != nil { - return "", fmt.Errorf("Server Error: %s", err) - } - reqStatusCode = req1.StatusCode - defer req1.Body.Close() - reqBody, err = ioutil.ReadAll(req1.Body) - if err != nil { - return "", fmt.Errorf("Server Error: [%#v] %s", reqStatusCode, err) - } - - if reqStatusCode == 201 { - if loginAgainstOfficialIndex { - status = "Account created. Please use the confirmation link we sent" + - " to your e-mail to activate it." - } else { - // *TODO: Use registry configuration to determine what this says, if anything? - status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it." - } - } else if reqStatusCode == 400 { - if string(reqBody) == "\"Username or email already exists\"" { - req, err := http.NewRequest("GET", serverAddress+"users/", nil) - req.SetBasicAuth(authConfig.Username, authConfig.Password) - resp, err := registryEndpoint.client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode == 200 { - return "Login Succeeded", nil - } else if resp.StatusCode == 401 { - return "", fmt.Errorf("Wrong login/password, please try again") - } else if resp.StatusCode == 403 { - if loginAgainstOfficialIndex { - return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") - } - // *TODO: Use registry configuration to determine what this says, if anything? - return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) - } else if resp.StatusCode == 500 { // Issue #14326 - logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) - return "", fmt.Errorf("Internal Server Error") - } - return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) - } - return "", fmt.Errorf("Registration: %s", reqBody) - - } else if reqStatusCode == 401 { - // This case would happen with private registries where /v1/users is - // protected, so people can use `docker login` as an auth check. - req, err := http.NewRequest("GET", serverAddress+"users/", nil) - req.SetBasicAuth(authConfig.Username, authConfig.Password) - resp, err := registryEndpoint.client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode == 200 { - return "Login Succeeded", nil - } else if resp.StatusCode == 401 { - return "", fmt.Errorf("Wrong login/password, please try again") - } else { - return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, - resp.StatusCode, resp.Header) - } - } else { - return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody) - } - return status, nil -} - -// loginV2 tries to login to the v2 registry server. The given registry endpoint has been -// pinged or setup with a list of authorization challenges. Each of these challenges are -// tried until one of them succeeds. Currently supported challenge schemes are: -// HTTP Basic Authorization -// Token Authorization with a separate token issuing server -// NOTE: the v2 logic does not attempt to create a user account if one doesn't exist. For -// now, users should create their account through other means like directly from a web page -// served by the v2 registry service provider. Whether this will be supported in the future -// is to be determined. -func loginV2(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, scope string) (string, error) { - logrus.Debugf("attempting v2 login to registry endpoint %s", registryEndpoint) - var ( - err error - allErrors []error - ) - - for _, challenge := range registryEndpoint.AuthChallenges { - params := make(map[string]string, len(challenge.Parameters)+1) - for k, v := range challenge.Parameters { - params[k] = v - } - params["scope"] = scope - logrus.Debugf("trying %q auth challenge with params %v", challenge.Scheme, params) - - switch strings.ToLower(challenge.Scheme) { - case "basic": - err = tryV2BasicAuthLogin(authConfig, params, registryEndpoint) - case "bearer": - err = tryV2TokenAuthLogin(authConfig, params, registryEndpoint) - default: - // Unsupported challenge types are explicitly skipped. - err = fmt.Errorf("unsupported auth scheme: %q", challenge.Scheme) - } - - if err == nil { - return "Login Succeeded", nil - } - - logrus.Debugf("error trying auth challenge %q: %s", challenge.Scheme, err) - - allErrors = append(allErrors, err) - } - - return "", fmt.Errorf("no successful auth challenge for %s - errors: %s", registryEndpoint, allErrors) -} - -func tryV2BasicAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error { - req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil) - if err != nil { - return err - } - req.SetBasicAuth(authConfig.Username, authConfig.Password) - resp, err := registryEndpoint.client.Do(req) if err != nil { - return err + // fallback when request could not be completed + return "", "", fallbackError{ + err: err, + } } defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("basic auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode)) + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", "", err } - - return nil + if resp.StatusCode == http.StatusOK { + return "Login Succeeded", "", nil + } else if resp.StatusCode == http.StatusUnauthorized { + if loginAgainstOfficialIndex { + return "", "", fmt.Errorf("Wrong login/password, please try again. Haven't got a Docker ID? Create one at https://hub.docker.com") + } + return "", "", fmt.Errorf("Wrong login/password, please try again") + } else if resp.StatusCode == http.StatusForbidden { + if loginAgainstOfficialIndex { + return "", "", fmt.Errorf("Login: Account is not active. Please check your e-mail for a confirmation link.") + } + // *TODO: Use registry configuration to determine what this says, if anything? + return "", "", fmt.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) + } else if resp.StatusCode == http.StatusInternalServerError { // Issue #14326 + logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) + return "", "", fmt.Errorf("Internal Server Error") + } + return "", "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, + resp.StatusCode, resp.Header) } -func tryV2TokenAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error { - token, err := getToken(authConfig.Username, authConfig.Password, params, registryEndpoint) +type loginCredentialStore struct { + authConfig *types.AuthConfig +} + +func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { + return lcs.authConfig.Username, lcs.authConfig.Password +} + +func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { + return lcs.authConfig.IdentityToken +} + +func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { + lcs.authConfig.IdentityToken = token +} + +type fallbackError struct { + err error +} + +func (err fallbackError) Error() string { + return err.err.Error() +} + +// loginV2 tries to login to the v2 registry server. The given registry +// endpoint will be pinged to get authorization challenges. These challenges +// will be used to authenticate against the registry to validate credentials. +func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { + logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/") + + modifiers := DockerHeaders(userAgent, nil) + authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) + + challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) if err != nil { - return err + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err } - req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil) - if err != nil { - return err + credentialAuthConfig := *authConfig + creds := loginCredentialStore{ + authConfig: &credentialAuthConfig, } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + OfflineAccess: true, + ClientID: AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + tr := transport.NewTransport(authTransport, modifiers...) - resp, err := registryEndpoint.client.Do(req) + loginClient := &http.Client{ + Transport: tr, + Timeout: 15 * time.Second, + } + + endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) if err != nil { - return err + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + + resp, err := loginClient.Do(req) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return fmt.Errorf("token auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode)) + // TODO(dmcgowan): Attempt to further interpret result, status code and error code string + err := fmt.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err } - return nil + return "Login Succeeded", credentialAuthConfig.IdentityToken, nil + } // ResolveAuthConfig matches an auth configuration to a server address or a URL -func ResolveAuthConfig(config *cliconfig.ConfigFile, index *IndexInfo) cliconfig.AuthConfig { - configKey := index.GetAuthConfigKey() +func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := GetAuthConfigKey(index) // First try the happy case - if c, found := config.AuthConfigs[configKey]; found || index.Official { + if c, found := authConfigs[configKey]; found || index.Official { return c } @@ -243,12 +191,72 @@ func ResolveAuthConfig(config *cliconfig.ConfigFile, index *IndexInfo) cliconfig // Maybe they have a legacy config file, we will iterate the keys converting // them to the new format and testing - for registry, ac := range config.AuthConfigs { + for registry, ac := range authConfigs { if configKey == convertToHostname(registry) { return ac } } // When all else fails, return an empty auth config - return cliconfig.AuthConfig{} + return types.AuthConfig{} +} + +// PingResponseError is used when the response from a ping +// was received but invalid. +type PingResponseError struct { + Err error +} + +func (err PingResponseError) Error() string { + return err.Error() +} + +// PingV2Registry attempts to ping a v2 registry and on success return a +// challenge manager for the supported authentication types and +// whether v2 was confirmed by the response. If a response is received but +// cannot be interpreted a PingResponseError will be returned. +func PingV2Registry(endpoint APIEndpoint, transport http.RoundTripper) (auth.ChallengeManager, bool, error) { + var ( + foundV2 = false + v2Version = auth.APIVersion{ + Type: "registry", + Version: "2.0", + } + ) + + pingClient := &http.Client{ + Transport: transport, + Timeout: 15 * time.Second, + } + endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, false, err + } + resp, err := pingClient.Do(req) + if err != nil { + return nil, false, err + } + defer resp.Body.Close() + + versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) + for _, pingVersion := range versions { + if pingVersion == v2Version { + // The version header indicates we're definitely + // talking to a v2 registry. So don't allow future + // fallbacks to the v1 protocol. + + foundV2 = true + break + } + } + + challengeManager := auth.NewSimpleChallengeManager() + if err := challengeManager.AddResponse(resp); err != nil { + return nil, foundV2, PingResponseError{ + Err: err, + } + } + + return challengeManager, foundV2, nil } diff --git a/vendor/github.com/docker/docker/registry/auth_test.go b/vendor/github.com/docker/docker/registry/auth_test.go deleted file mode 100644 index a8e3da01..00000000 --- a/vendor/github.com/docker/docker/registry/auth_test.go +++ /dev/null @@ -1,173 +0,0 @@ -package registry - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/docker/docker/cliconfig" -) - -func TestEncodeAuth(t *testing.T) { - newAuthConfig := &cliconfig.AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"} - authStr := cliconfig.EncodeAuth(newAuthConfig) - decAuthConfig := &cliconfig.AuthConfig{} - var err error - decAuthConfig.Username, decAuthConfig.Password, err = cliconfig.DecodeAuth(authStr) - if err != nil { - t.Fatal(err) - } - if newAuthConfig.Username != decAuthConfig.Username { - t.Fatal("Encode Username doesn't match decoded Username") - } - if newAuthConfig.Password != decAuthConfig.Password { - t.Fatal("Encode Password doesn't match decoded Password") - } - if authStr != "a2VuOnRlc3Q=" { - t.Fatal("AuthString encoding isn't correct.") - } -} - -func setupTempConfigFile() (*cliconfig.ConfigFile, error) { - root, err := ioutil.TempDir("", "docker-test-auth") - if err != nil { - return nil, err - } - root = filepath.Join(root, cliconfig.ConfigFileName) - configFile := cliconfig.NewConfigFile(root) - - for _, registry := range []string{"testIndex", IndexServer} { - configFile.AuthConfigs[registry] = cliconfig.AuthConfig{ - Username: "docker-user", - Password: "docker-pass", - Email: "docker@docker.io", - } - } - - return configFile, nil -} - -func TestSameAuthDataPostSave(t *testing.T) { - configFile, err := setupTempConfigFile() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(configFile.Filename()) - - err = configFile.Save() - if err != nil { - t.Fatal(err) - } - - authConfig := configFile.AuthConfigs["testIndex"] - if authConfig.Username != "docker-user" { - t.Fail() - } - if authConfig.Password != "docker-pass" { - t.Fail() - } - if authConfig.Email != "docker@docker.io" { - t.Fail() - } - if authConfig.Auth != "" { - t.Fail() - } -} - -func TestResolveAuthConfigIndexServer(t *testing.T) { - configFile, err := setupTempConfigFile() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(configFile.Filename()) - - indexConfig := configFile.AuthConfigs[IndexServer] - - officialIndex := &IndexInfo{ - Official: true, - } - privateIndex := &IndexInfo{ - Official: false, - } - - resolved := ResolveAuthConfig(configFile, officialIndex) - assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServer") - - resolved = ResolveAuthConfig(configFile, privateIndex) - assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServer") -} - -func TestResolveAuthConfigFullURL(t *testing.T) { - configFile, err := setupTempConfigFile() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(configFile.Filename()) - - registryAuth := cliconfig.AuthConfig{ - Username: "foo-user", - Password: "foo-pass", - Email: "foo@example.com", - } - localAuth := cliconfig.AuthConfig{ - Username: "bar-user", - Password: "bar-pass", - Email: "bar@example.com", - } - officialAuth := cliconfig.AuthConfig{ - Username: "baz-user", - Password: "baz-pass", - Email: "baz@example.com", - } - configFile.AuthConfigs[IndexServer] = officialAuth - - expectedAuths := map[string]cliconfig.AuthConfig{ - "registry.example.com": registryAuth, - "localhost:8000": localAuth, - "registry.com": localAuth, - } - - validRegistries := map[string][]string{ - "registry.example.com": { - "https://registry.example.com/v1/", - "http://registry.example.com/v1/", - "registry.example.com", - "registry.example.com/v1/", - }, - "localhost:8000": { - "https://localhost:8000/v1/", - "http://localhost:8000/v1/", - "localhost:8000", - "localhost:8000/v1/", - }, - "registry.com": { - "https://registry.com/v1/", - "http://registry.com/v1/", - "registry.com", - "registry.com/v1/", - }, - } - - for configKey, registries := range validRegistries { - configured, ok := expectedAuths[configKey] - if !ok || configured.Email == "" { - t.Fail() - } - index := &IndexInfo{ - Name: configKey, - } - for _, registry := range registries { - configFile.AuthConfigs[registry] = configured - resolved := ResolveAuthConfig(configFile, index) - if resolved.Email != configured.Email { - t.Errorf("%s -> %q != %q\n", registry, resolved.Email, configured.Email) - } - delete(configFile.AuthConfigs, registry) - resolved = ResolveAuthConfig(configFile, index) - if resolved.Email == configured.Email { - t.Errorf("%s -> %q == %q\n", registry, resolved.Email, configured.Email) - } - } - } -} diff --git a/vendor/github.com/docker/docker/registry/authchallenge.go b/vendor/github.com/docker/docker/registry/authchallenge.go deleted file mode 100644 index e300d82a..00000000 --- a/vendor/github.com/docker/docker/registry/authchallenge.go +++ /dev/null @@ -1,150 +0,0 @@ -package registry - -import ( - "net/http" - "strings" -) - -// Octet types from RFC 2616. -type octetType byte - -// AuthorizationChallenge carries information -// from a WWW-Authenticate response header. -type AuthorizationChallenge struct { - Scheme string - Parameters map[string]string -} - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -func parseAuthHeader(header http.Header) []*AuthorizationChallenge { - var challenges []*AuthorizationChallenge - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, &AuthorizationChallenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + i; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go index e8f2287e..50061329 100644 --- a/vendor/github.com/docker/docker/registry/config.go +++ b/vendor/github.com/docker/docker/registry/config.go @@ -1,26 +1,35 @@ package registry import ( - "encoding/json" "errors" "fmt" "net" "net/url" "strings" - "github.com/docker/distribution/reference" - "github.com/docker/docker/image" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + registrytypes "github.com/docker/engine-api/types/registry" ) -// Options holds command line options. -type Options struct { - Mirrors opts.ListOpts - InsecureRegistries opts.ListOpts +// ServiceOptions holds command line options. +type ServiceOptions struct { + Mirrors []string `json:"registry-mirrors,omitempty"` + InsecureRegistries []string `json:"insecure-registries,omitempty"` + + // V2Only controls access to legacy registries. If it is set to true via the + // command line flag the daemon will not attempt to contact v1 legacy registries + V2Only bool `json:"disable-legacy-registry,omitempty"` } -const ( +// serviceConfig holds daemon configuration for the registry service. +type serviceConfig struct { + registrytypes.ServiceConfig + V2Only bool +} + +var ( // DefaultNamespace is the default namespace DefaultNamespace = "docker.io" // DefaultRegistryVersionHeader is the name of the default HTTP header @@ -28,14 +37,24 @@ const ( DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" // IndexServer is the v1 registry server used for user auth + account creation - IndexServer = DefaultV1Registry + "/v1/" + IndexServer = DefaultV1Registry.String() + "/v1/" // IndexName is the name of the index IndexName = "docker.io" // NotaryServer is the endpoint serving the Notary trust server NotaryServer = "https://notary.docker.io" - // IndexServer = "https://registry-stage.hub.docker.com/v1/" + // DefaultV1Registry is the URI of the default v1 registry + DefaultV1Registry = &url.URL{ + Scheme: "https", + Host: "index.docker.io", + } + + // DefaultV2Registry is the URI of the default v2 registry + DefaultV2Registry = &url.URL{ + Scheme: "https", + Host: "registry-1.docker.io", + } ) var ( @@ -43,80 +62,53 @@ var ( // not have the correct form ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") - emptyServiceConfig = NewServiceConfig(nil) - - // V2Only controls access to legacy registries. If it is set to true via the - // command line flag the daemon will not attempt to contact v1 legacy registries - V2Only = false + emptyServiceConfig = newServiceConfig(ServiceOptions{}) ) -// InstallFlags adds command-line options to the top-level flag parser for +// for mocking in unit tests +var lookupIP = net.LookupIP + +// InstallCliFlags adds command-line options to the top-level flag parser for // the current process. -func (options *Options) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { - options.Mirrors = opts.NewListOpts(ValidateMirror) - cmd.Var(&options.Mirrors, []string{"-registry-mirror"}, usageFn("Preferred Docker registry mirror")) - options.InsecureRegistries = opts.NewListOpts(ValidateIndexName) - cmd.Var(&options.InsecureRegistries, []string{"-insecure-registry"}, usageFn("Enable insecure registry communication")) - cmd.BoolVar(&V2Only, []string{"-disable-legacy-registry"}, false, "Do not contact legacy registries") +func (options *ServiceOptions) InstallCliFlags(cmd *flag.FlagSet, usageFn func(string) string) { + mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, ValidateMirror) + cmd.Var(mirrors, []string{"-registry-mirror"}, usageFn("Preferred Docker registry mirror")) + + insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, ValidateIndexName) + cmd.Var(insecureRegistries, []string{"-insecure-registry"}, usageFn("Enable insecure registry communication")) + + cmd.BoolVar(&options.V2Only, []string{"-disable-legacy-registry"}, false, usageFn("Do not contact legacy registries")) } -type netIPNet net.IPNet - -func (ipnet *netIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -func (ipnet *netIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = netIPNet(*cidr) - } - } - return -} - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - InsecureRegistryCIDRs []*netIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NewServiceConfig returns a new instance of ServiceConfig -func NewServiceConfig(options *Options) *ServiceConfig { - if options == nil { - options = &Options{ - Mirrors: opts.NewListOpts(nil), - InsecureRegistries: opts.NewListOpts(nil), - } - } - +// newServiceConfig returns a new instance of ServiceConfig +func newServiceConfig(options ServiceOptions) *serviceConfig { // Localhost is by default considered as an insecure registry // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). // // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change // daemon flags on boot2docker? - options.InsecureRegistries.Set("127.0.0.0/8") + options.InsecureRegistries = append(options.InsecureRegistries, "127.0.0.0/8") - config := &ServiceConfig{ - InsecureRegistryCIDRs: make([]*netIPNet, 0), - IndexConfigs: make(map[string]*IndexInfo, 0), - // Hack: Bypass setting the mirrors to IndexConfigs since they are going away - // and Mirrors are only for the official registry anyways. - Mirrors: options.Mirrors.GetAll(), + config := &serviceConfig{ + ServiceConfig: registrytypes.ServiceConfig{ + InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), + IndexConfigs: make(map[string]*registrytypes.IndexInfo, 0), + // Hack: Bypass setting the mirrors to IndexConfigs since they are going away + // and Mirrors are only for the official registry anyways. + Mirrors: options.Mirrors, + }, + V2Only: options.V2Only, } // Split --insecure-registry into CIDR and registry-specific settings. - for _, r := range options.InsecureRegistries.GetAll() { + for _, r := range options.InsecureRegistries { // Check if CIDR was passed to --insecure-registry _, ipnet, err := net.ParseCIDR(r) if err == nil { // Valid CIDR. - config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, (*netIPNet)(ipnet)) + config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, (*registrytypes.NetIPNet)(ipnet)) } else { // Assume `host:port` if not CIDR. - config.IndexConfigs[r] = &IndexInfo{ + config.IndexConfigs[r] = ®istrytypes.IndexInfo{ Name: r, Mirrors: make([]string, 0), Secure: false, @@ -126,7 +118,7 @@ func NewServiceConfig(options *Options) *ServiceConfig { } // Configure public registry. - config.IndexConfigs[IndexName] = &IndexInfo{ + config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ Name: IndexName, Mirrors: config.Mirrors, Secure: true, @@ -147,9 +139,9 @@ func NewServiceConfig(options *Options) *ServiceConfig { // or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained // in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element // of insecureRegistries. -func (config *ServiceConfig) isSecureIndex(indexName string) bool { +func isSecureIndex(config *serviceConfig, indexName string) bool { // Check for configured index, first. This is needed in case isSecureIndex - // is called from anything besides NewIndexInfo, in order to honor per-index configurations. + // is called from anything besides newIndexInfo, in order to honor per-index configurations. if index, ok := config.IndexConfigs[indexName]; ok { return index.Secure } @@ -205,31 +197,15 @@ func ValidateMirror(val string) (string, error) { // ValidateIndexName validates an index name. func ValidateIndexName(val string) (string, error) { - // 'index.docker.io' => 'docker.io' - if val == "index."+IndexName { - val = IndexName + if val == reference.LegacyDefaultHostname { + val = reference.DefaultHostname } if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) } - // *TODO: Check if valid hostname[:port]/ip[:port]? return val, nil } -func validateRemoteName(remoteName string) error { - - if !strings.Contains(remoteName, "/") { - - // the repository name must not be a valid image ID - if err := image.ValidateID(remoteName); err == nil { - return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", remoteName) - } - } - - _, err := reference.WithName(remoteName) - return err -} - func validateNoSchema(reposName string) error { if strings.Contains(reposName, "://") { // It cannot contain a scheme! @@ -238,21 +214,8 @@ func validateNoSchema(reposName string) error { return nil } -// ValidateRepositoryName validates a repository name -func ValidateRepositoryName(reposName string) error { - var err error - if err = validateNoSchema(reposName); err != nil { - return err - } - indexName, remoteName := splitReposName(reposName) - if _, err = ValidateIndexName(indexName); err != nil { - return err - } - return validateRemoteName(remoteName) -} - -// NewIndexInfo returns IndexInfo configuration from indexName -func (config *ServiceConfig) NewIndexInfo(indexName string) (*IndexInfo, error) { +// newIndexInfo returns IndexInfo configuration from indexName +func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { var err error indexName, err = ValidateIndexName(indexName) if err != nil { @@ -265,124 +228,47 @@ func (config *ServiceConfig) NewIndexInfo(indexName string) (*IndexInfo, error) } // Construct a non-configured index info. - index := &IndexInfo{ + index := ®istrytypes.IndexInfo{ Name: indexName, Mirrors: make([]string, 0), Official: false, } - index.Secure = config.isSecureIndex(indexName) + index.Secure = isSecureIndex(config, indexName) return index, nil } // GetAuthConfigKey special-cases using the full index address of the official // index as the AuthConfig key, and uses the (host)name[:port] for private indexes. -func (index *IndexInfo) GetAuthConfigKey() string { +func GetAuthConfigKey(index *registrytypes.IndexInfo) string { if index.Official { return IndexServer } return index.Name } -// splitReposName breaks a reposName into an index name and remote name -func splitReposName(reposName string) (string, string) { - nameParts := strings.SplitN(reposName, "/", 2) - var indexName, remoteName string - if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && - !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { - // This is a Docker Index repos (ex: samalba/hipache or ubuntu) - // 'docker.io' - indexName = IndexName - remoteName = reposName - } else { - indexName = nameParts[0] - remoteName = nameParts[1] - } - return indexName, remoteName -} - -// NewRepositoryInfo validates and breaks down a repository name into a RepositoryInfo -func (config *ServiceConfig) NewRepositoryInfo(reposName string, bySearch bool) (*RepositoryInfo, error) { - if err := validateNoSchema(reposName); err != nil { - return nil, err - } - - indexName, remoteName := splitReposName(reposName) - - if !bySearch { - if err := validateRemoteName(remoteName); err != nil { - return nil, err - } - } - - repoInfo := &RepositoryInfo{ - RemoteName: remoteName, - } - - var err error - repoInfo.Index, err = config.NewIndexInfo(indexName) +// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo +func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { + index, err := newIndexInfo(config, name.Hostname()) if err != nil { return nil, err } - - if repoInfo.Index.Official { - normalizedName := repoInfo.RemoteName - if strings.HasPrefix(normalizedName, "library/") { - // If pull "library/foo", it's stored locally under "foo" - normalizedName = strings.SplitN(normalizedName, "/", 2)[1] - } - - repoInfo.LocalName = normalizedName - repoInfo.RemoteName = normalizedName - // If the normalized name does not contain a '/' (e.g. "foo") - // then it is an official repo. - if strings.IndexRune(normalizedName, '/') == -1 { - repoInfo.Official = true - // Fix up remote name for official repos. - repoInfo.RemoteName = "library/" + normalizedName - } - - repoInfo.CanonicalName = "docker.io/" + repoInfo.RemoteName - } else { - repoInfo.LocalName = repoInfo.Index.Name + "/" + repoInfo.RemoteName - repoInfo.CanonicalName = repoInfo.LocalName - - } - - return repoInfo, nil -} - -// GetSearchTerm special-cases using local name for official index, and -// remote name for private indexes. -func (repoInfo *RepositoryInfo) GetSearchTerm() string { - if repoInfo.Index.Official { - return repoInfo.LocalName - } - return repoInfo.RemoteName + official := !strings.ContainsRune(name.Name(), '/') + return &RepositoryInfo{name, index, official}, nil } // ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but // lacks registry configuration. -func ParseRepositoryInfo(reposName string) (*RepositoryInfo, error) { - return emptyServiceConfig.NewRepositoryInfo(reposName, false) +func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { + return newRepositoryInfo(emptyServiceConfig, reposName) } -// ParseIndexInfo will use repository name to get back an indexInfo. -func ParseIndexInfo(reposName string) (*IndexInfo, error) { - indexName, _ := splitReposName(reposName) +// ParseSearchIndexInfo will use repository name to get back an indexInfo. +func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { + indexName, _ := splitReposSearchTerm(reposName) - indexInfo, err := emptyServiceConfig.NewIndexInfo(indexName) + indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) if err != nil { return nil, err } return indexInfo, nil } - -// NormalizeLocalName transforms a repository name into a normalize LocalName -// Passes through the name without transformation on error (image id, etc) -func NormalizeLocalName(name string) string { - repoInfo, err := ParseRepositoryInfo(name) - if err != nil { - return name - } - return repoInfo.LocalName -} diff --git a/vendor/github.com/docker/docker/registry/config_test.go b/vendor/github.com/docker/docker/registry/config_test.go deleted file mode 100644 index 25578a7f..00000000 --- a/vendor/github.com/docker/docker/registry/config_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package registry - -import ( - "testing" -) - -func TestValidateMirror(t *testing.T) { - valid := []string{ - "http://mirror-1.com", - "https://mirror-1.com", - "http://localhost", - "https://localhost", - "http://localhost:5000", - "https://localhost:5000", - "http://127.0.0.1", - "https://127.0.0.1", - "http://127.0.0.1:5000", - "https://127.0.0.1:5000", - } - - invalid := []string{ - "!invalid!://%as%", - "ftp://mirror-1.com", - "http://mirror-1.com/", - "http://mirror-1.com/?q=foo", - "http://mirror-1.com/v1/", - "http://mirror-1.com/v1/?q=foo", - "http://mirror-1.com/v1/?q=foo#frag", - "http://mirror-1.com?q=foo", - "https://mirror-1.com#frag", - "https://mirror-1.com/", - "https://mirror-1.com/#frag", - "https://mirror-1.com/v1/", - "https://mirror-1.com/v1/#", - "https://mirror-1.com?q", - } - - for _, address := range valid { - if ret, err := ValidateMirror(address); err != nil || ret == "" { - t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) - } - } - - for _, address := range invalid { - if ret, err := ValidateMirror(address); err == nil || ret != "" { - t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) - } - } -} diff --git a/vendor/github.com/docker/docker/registry/config_unix.go b/vendor/github.com/docker/docker/registry/config_unix.go index 32f167d0..b81d2493 100644 --- a/vendor/github.com/docker/docker/registry/config_unix.go +++ b/vendor/github.com/docker/docker/registry/config_unix.go @@ -2,13 +2,7 @@ package registry -const ( - // DefaultV1Registry is the URI of the default v1 registry - DefaultV1Registry = "https://index.docker.io" - - // DefaultV2Registry is the URI of the default v2 registry - DefaultV2Registry = "https://registry-1.docker.io" - +var ( // CertsDir is the directory where certificates are stored CertsDir = "/etc/docker/certs.d" ) diff --git a/vendor/github.com/docker/docker/registry/config_windows.go b/vendor/github.com/docker/docker/registry/config_windows.go deleted file mode 100644 index d01b2618..00000000 --- a/vendor/github.com/docker/docker/registry/config_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package registry - -import ( - "os" - "path/filepath" - "strings" -) - -const ( - // DefaultV1Registry is the URI of the default v1 registry - DefaultV1Registry = "https://registry-win-tp3.docker.io" - - // DefaultV2Registry is the URI of the default (official) v2 registry. - // This is the windows-specific endpoint. - // - // Currently it is a TEMPORARY link that allows Microsoft to continue - // development of Docker Engine for Windows. - DefaultV2Registry = "https://registry-win-tp3.docker.io" -) - -// CertsDir is the directory where certificates are stored -var CertsDir = os.Getenv("programdata") + `\docker\certs.d` - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:\index.docker.io\v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return filepath.FromSlash(strings.Replace(s, ":", "", -1)) -} diff --git a/vendor/github.com/docker/docker/registry/endpoint.go b/vendor/github.com/docker/docker/registry/endpoint.go deleted file mode 100644 index 20805767..00000000 --- a/vendor/github.com/docker/docker/registry/endpoint.go +++ /dev/null @@ -1,276 +0,0 @@ -package registry - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client/transport" -) - -// for mocking in unit tests -var lookupIP = net.LookupIP - -// scans string for api version in the URL path. returns the trimmed address, if version found, string and API version. -func scanForAPIVersion(address string) (string, APIVersion) { - var ( - chunks []string - apiVersionStr string - ) - - if strings.HasSuffix(address, "/") { - address = address[:len(address)-1] - } - - chunks = strings.Split(address, "/") - apiVersionStr = chunks[len(chunks)-1] - - for k, v := range apiVersions { - if apiVersionStr == v { - address = strings.Join(chunks[:len(chunks)-1], "/") - return address, k - } - } - - return address, APIVersionUnknown -} - -// NewEndpoint parses the given address to return a registry endpoint. v can be used to -// specify a specific endpoint version -func NewEndpoint(index *IndexInfo, metaHeaders http.Header, v APIVersion) (*Endpoint, error) { - tlsConfig, err := newTLSConfig(index.Name, index.Secure) - if err != nil { - return nil, err - } - endpoint, err := newEndpoint(index.GetAuthConfigKey(), tlsConfig, metaHeaders) - if err != nil { - return nil, err - } - if v != APIVersionUnknown { - endpoint.Version = v - } - if err := validateEndpoint(endpoint); err != nil { - return nil, err - } - - return endpoint, nil -} - -func validateEndpoint(endpoint *Endpoint) error { - logrus.Debugf("pinging registry endpoint %s", endpoint) - - // Try HTTPS ping to registry - endpoint.URL.Scheme = "https" - if _, err := endpoint.Ping(); err != nil { - if endpoint.IsSecure { - // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` - // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. - return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) - } - - // If registry is insecure and HTTPS failed, fallback to HTTP. - logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) - endpoint.URL.Scheme = "http" - - var err2 error - if _, err2 = endpoint.Ping(); err2 == nil { - return nil - } - - return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) - } - - return nil -} - -func newEndpoint(address string, tlsConfig *tls.Config, metaHeaders http.Header) (*Endpoint, error) { - var ( - endpoint = new(Endpoint) - trimmedAddress string - err error - ) - - if !strings.HasPrefix(address, "http") { - address = "https://" + address - } - - endpoint.IsSecure = (tlsConfig == nil || !tlsConfig.InsecureSkipVerify) - - trimmedAddress, endpoint.Version = scanForAPIVersion(address) - - if endpoint.URL, err = url.Parse(trimmedAddress); err != nil { - return nil, err - } - - // TODO(tiborvass): make sure a ConnectTimeout transport is used - tr := NewTransport(tlsConfig) - endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(metaHeaders)...)) - return endpoint, nil -} - -// Endpoint stores basic information about a registry endpoint. -type Endpoint struct { - client *http.Client - URL *url.URL - Version APIVersion - IsSecure bool - AuthChallenges []*AuthorizationChallenge - URLBuilder *v2.URLBuilder -} - -// Get the formated URL for the root of this registry Endpoint -func (e *Endpoint) String() string { - return fmt.Sprintf("%s/v%d/", e.URL, e.Version) -} - -// VersionString returns a formatted string of this -// endpoint address using the given API Version. -func (e *Endpoint) VersionString(version APIVersion) string { - return fmt.Sprintf("%s/v%d/", e.URL, version) -} - -// Path returns a formatted string for the URL -// of this endpoint with the given path appended. -func (e *Endpoint) Path(path string) string { - return fmt.Sprintf("%s/v%d/%s", e.URL, e.Version, path) -} - -// Ping pings the remote endpoint with v2 and v1 pings to determine the API -// version. It returns a PingResult containing the discovered version. The -// PingResult also indicates whether the registry is standalone or not. -func (e *Endpoint) Ping() (PingResult, error) { - // The ping logic to use is determined by the registry endpoint version. - switch e.Version { - case APIVersion1: - return e.pingV1() - case APIVersion2: - return e.pingV2() - } - - // APIVersionUnknown - // We should try v2 first... - e.Version = APIVersion2 - regInfo, errV2 := e.pingV2() - if errV2 == nil { - return regInfo, nil - } - - // ... then fallback to v1. - e.Version = APIVersion1 - regInfo, errV1 := e.pingV1() - if errV1 == nil { - return regInfo, nil - } - - e.Version = APIVersionUnknown - return PingResult{}, fmt.Errorf("unable to ping registry endpoint %s\nv2 ping attempt failed with error: %s\n v1 ping attempt failed with error: %s", e, errV2, errV1) -} - -func (e *Endpoint) pingV1() (PingResult, error) { - logrus.Debugf("attempting v1 ping for registry endpoint %s", e) - - if e.String() == IndexServer { - // Skip the check, we know this one is valid - // (and we never want to fallback to http in case of error) - return PingResult{Standalone: false}, nil - } - - req, err := http.NewRequest("GET", e.Path("_ping"), nil) - if err != nil { - return PingResult{Standalone: false}, err - } - - resp, err := e.client.Do(req) - if err != nil { - return PingResult{Standalone: false}, err - } - - defer resp.Body.Close() - - jsonString, err := ioutil.ReadAll(resp.Body) - if err != nil { - return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) - } - - // If the header is absent, we assume true for compatibility with earlier - // versions of the registry. default to true - info := PingResult{ - Standalone: true, - } - if err := json.Unmarshal(jsonString, &info); err != nil { - logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) - // don't stop here. Just assume sane defaults - } - if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { - logrus.Debugf("Registry version header: '%s'", hdr) - info.Version = hdr - } - logrus.Debugf("PingResult.Version: %q", info.Version) - - standalone := resp.Header.Get("X-Docker-Registry-Standalone") - logrus.Debugf("Registry standalone header: '%s'", standalone) - // Accepted values are "true" (case-insensitive) and "1". - if strings.EqualFold(standalone, "true") || standalone == "1" { - info.Standalone = true - } else if len(standalone) > 0 { - // there is a header set, and it is not "true" or "1", so assume fails - info.Standalone = false - } - logrus.Debugf("PingResult.Standalone: %t", info.Standalone) - return info, nil -} - -func (e *Endpoint) pingV2() (PingResult, error) { - logrus.Debugf("attempting v2 ping for registry endpoint %s", e) - - req, err := http.NewRequest("GET", e.Path(""), nil) - if err != nil { - return PingResult{}, err - } - - resp, err := e.client.Do(req) - if err != nil { - return PingResult{}, err - } - defer resp.Body.Close() - - // The endpoint may have multiple supported versions. - // Ensure it supports the v2 Registry API. - var supportsV2 bool - -HeaderLoop: - for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] { - for _, versionName := range strings.Fields(supportedVersions) { - if versionName == "registry/2.0" { - supportsV2 = true - break HeaderLoop - } - } - } - - if !supportsV2 { - return PingResult{}, fmt.Errorf("%s does not appear to be a v2 registry endpoint", e) - } - - if resp.StatusCode == http.StatusOK { - // It would seem that no authentication/authorization is required. - // So we don't need to parse/add any authorization schemes. - return PingResult{Standalone: true}, nil - } - - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - e.AuthChallenges = parseAuthHeader(resp.Header) - return PingResult{}, nil - } - - return PingResult{}, fmt.Errorf("v2 registry endpoint returned status %d: %q", resp.StatusCode, http.StatusText(resp.StatusCode)) -} diff --git a/vendor/github.com/docker/docker/registry/endpoint_test.go b/vendor/github.com/docker/docker/registry/endpoint_test.go deleted file mode 100644 index ee301dbd..00000000 --- a/vendor/github.com/docker/docker/registry/endpoint_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package registry - -import ( - "net/http" - "net/http/httptest" - "net/url" - "testing" -) - -func TestEndpointParse(t *testing.T) { - testData := []struct { - str string - expected string - }{ - {IndexServer, IndexServer}, - {"http://0.0.0.0:5000/v1/", "http://0.0.0.0:5000/v1/"}, - {"http://0.0.0.0:5000/v2/", "http://0.0.0.0:5000/v2/"}, - {"http://0.0.0.0:5000", "http://0.0.0.0:5000/v0/"}, - {"0.0.0.0:5000", "https://0.0.0.0:5000/v0/"}, - } - for _, td := range testData { - e, err := newEndpoint(td.str, nil, nil) - if err != nil { - t.Errorf("%q: %s", td.str, err) - } - if e == nil { - t.Logf("something's fishy, endpoint for %q is nil", td.str) - continue - } - if e.String() != td.expected { - t.Errorf("expected %q, got %q", td.expected, e.String()) - } - } -} - -// Ensure that a registry endpoint that responds with a 401 only is determined -// to be a v1 registry unless it includes a valid v2 API header. -func TestValidateEndpointAmbiguousAPIVersion(t *testing.T) { - requireBasicAuthHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("WWW-Authenticate", `Basic realm="localhost"`) - w.WriteHeader(http.StatusUnauthorized) - }) - - requireBasicAuthHandlerV2 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // This mock server supports v2.0, v2.1, v42.0, and v100.0 - w.Header().Add("Docker-Distribution-API-Version", "registry/100.0 registry/42.0") - w.Header().Add("Docker-Distribution-API-Version", "registry/2.0 registry/2.1") - requireBasicAuthHandler.ServeHTTP(w, r) - }) - - // Make a test server which should validate as a v1 server. - testServer := httptest.NewServer(requireBasicAuthHandler) - defer testServer.Close() - - testServerURL, err := url.Parse(testServer.URL) - if err != nil { - t.Fatal(err) - } - - testEndpoint := Endpoint{ - URL: testServerURL, - Version: APIVersionUnknown, - client: HTTPClient(NewTransport(nil)), - } - - if err = validateEndpoint(&testEndpoint); err != nil { - t.Fatal(err) - } - - if testEndpoint.Version != APIVersion1 { - t.Fatalf("expected endpoint to validate to %d, got %d", APIVersion1, testEndpoint.Version) - } - - // Make a test server which should validate as a v2 server. - testServer = httptest.NewServer(requireBasicAuthHandlerV2) - defer testServer.Close() - - testServerURL, err = url.Parse(testServer.URL) - if err != nil { - t.Fatal(err) - } - - testEndpoint.URL = testServerURL - testEndpoint.Version = APIVersionUnknown - - if err = validateEndpoint(&testEndpoint); err != nil { - t.Fatal(err) - } - - if testEndpoint.Version != APIVersion2 { - t.Fatalf("expected endpoint to validate to %d, got %d", APIVersion2, testEndpoint.Version) - } -} diff --git a/vendor/github.com/docker/docker/registry/endpoint_v1.go b/vendor/github.com/docker/docker/registry/endpoint_v1.go new file mode 100644 index 00000000..fd81972c --- /dev/null +++ b/vendor/github.com/docker/docker/registry/endpoint_v1.go @@ -0,0 +1,198 @@ +package registry + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + registrytypes "github.com/docker/engine-api/types/registry" +) + +// V1Endpoint stores basic information about a V1 registry endpoint. +type V1Endpoint struct { + client *http.Client + URL *url.URL + IsSecure bool +} + +// NewV1Endpoint parses the given address to return a registry endpoint. +func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + tlsConfig, err := newTLSConfig(index.Name, index.Secure) + if err != nil { + return nil, err + } + + endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) + if err != nil { + return nil, err + } + + if err := validateEndpoint(endpoint); err != nil { + return nil, err + } + + return endpoint, nil +} + +func validateEndpoint(endpoint *V1Endpoint) error { + logrus.Debugf("pinging registry endpoint %s", endpoint) + + // Try HTTPS ping to registry + endpoint.URL.Scheme = "https" + if _, err := endpoint.Ping(); err != nil { + if endpoint.IsSecure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. + return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) + } + + // If registry is insecure and HTTPS failed, fallback to HTTP. + logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) + endpoint.URL.Scheme = "http" + + var err2 error + if _, err2 = endpoint.Ping(); err2 == nil { + return nil + } + + return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) + } + + return nil +} + +func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + endpoint := &V1Endpoint{ + IsSecure: (tlsConfig == nil || !tlsConfig.InsecureSkipVerify), + URL: new(url.URL), + } + + *endpoint.URL = address + + // TODO(tiborvass): make sure a ConnectTimeout transport is used + tr := NewTransport(tlsConfig) + endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(userAgent, metaHeaders)...)) + return endpoint, nil +} + +// trimV1Address trims the version off the address and returns the +// trimmed address or an error if there is a non-V1 version. +func trimV1Address(address string) (string, error) { + var ( + chunks []string + apiVersionStr string + ) + + if strings.HasSuffix(address, "/") { + address = address[:len(address)-1] + } + + chunks = strings.Split(address, "/") + apiVersionStr = chunks[len(chunks)-1] + if apiVersionStr == "v1" { + return strings.Join(chunks[:len(chunks)-1], "/"), nil + } + + for k, v := range apiVersions { + if k != APIVersion1 && apiVersionStr == v { + return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr) + } + } + + return address, nil +} + +func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { + address = "https://" + address + } + + address, err := trimV1Address(address) + if err != nil { + return nil, err + } + + uri, err := url.Parse(address) + if err != nil { + return nil, err + } + + endpoint, err := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// Get the formatted URL for the root of this registry Endpoint +func (e *V1Endpoint) String() string { + return e.URL.String() + "/v1/" +} + +// Path returns a formatted string for the URL +// of this endpoint with the given path appended. +func (e *V1Endpoint) Path(path string) string { + return e.URL.String() + "/v1/" + path +} + +// Ping returns a PingResult which indicates whether the registry is standalone or not. +func (e *V1Endpoint) Ping() (PingResult, error) { + logrus.Debugf("attempting v1 ping for registry endpoint %s", e) + + if e.String() == IndexServer { + // Skip the check, we know this one is valid + // (and we never want to fallback to http in case of error) + return PingResult{Standalone: false}, nil + } + + req, err := http.NewRequest("GET", e.Path("_ping"), nil) + if err != nil { + return PingResult{Standalone: false}, err + } + + resp, err := e.client.Do(req) + if err != nil { + return PingResult{Standalone: false}, err + } + + defer resp.Body.Close() + + jsonString, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := PingResult{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + logrus.Debugf("Registry version header: '%s'", hdr) + info.Version = hdr + } + logrus.Debugf("PingResult.Version: %q", info.Version) + + standalone := resp.Header.Get("X-Docker-Registry-Standalone") + logrus.Debugf("Registry standalone header: '%s'", standalone) + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false + } + logrus.Debugf("PingResult.Standalone: %t", info.Standalone) + return info, nil +} diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go index e8eb4785..8fdfe3b0 100644 --- a/vendor/github.com/docker/docker/registry/registry.go +++ b/vendor/github.com/docker/docker/registry/registry.go @@ -11,57 +11,27 @@ import ( "net/http" "os" "path/filepath" - "runtime" "strings" "time" "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/tlsconfig" - "github.com/docker/docker/pkg/useragent" + "github.com/docker/go-connections/tlsconfig" ) var ( // ErrAlreadyExists is an error returned if an image being pushed // already exists on the remote side ErrAlreadyExists = errors.New("Image already exists") - errLoginRequired = errors.New("Authentication is required.") ) -// dockerUserAgent is the User-Agent the Docker client uses to identify itself. -// It is populated on init(), comprising version information of different components. -var dockerUserAgent string - -func init() { - httpVersion := make([]useragent.VersionInfo, 0, 6) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: dockerversion.Version}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: dockerversion.GitCommit}) - if kernelVersion, err := kernel.GetKernelVersion(); err == nil { - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) - } - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) - - dockerUserAgent = useragent.AppendVersions("", httpVersion...) - - if runtime.GOOS != "linux" { - V2Only = true - } -} - func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { // PreferredServerCipherSuites should have no effect tlsConfig := tlsconfig.ServerDefault tlsConfig.InsecureSkipVerify = !isSecure - if isSecure { + if isSecure && CertsDir != "" { hostDir := filepath.Join(CertsDir, cleanPath(hostname)) logrus.Debugf("hostDir: %s", hostDir) if err := ReadCertsDirectory(&tlsConfig, hostDir); err != nil { @@ -108,7 +78,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { keyName := certName[:len(certName)-5] + ".key" logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, keyName) { - return fmt.Errorf("Missing key %s for certificate %s", keyName, certName) + return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName) } cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) if err != nil { @@ -121,7 +91,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { certName := keyName[:len(keyName)-4] + ".cert" logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, certName) { - return fmt.Errorf("Missing certificate %s for key %s", certName, keyName) + return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) } } } @@ -129,12 +99,13 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { return nil } -// DockerHeaders returns request modifiers that ensure requests have -// the User-Agent header set to dockerUserAgent and that metaHeaders -// are added. -func DockerHeaders(metaHeaders http.Header) []transport.RequestModifier { - modifiers := []transport.RequestModifier{ - transport.NewHeaderRequestModifier(http.Header{"User-Agent": []string{dockerUserAgent}}), +// DockerHeaders returns request modifiers with a User-Agent and metaHeaders +func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier { + modifiers := []transport.RequestModifier{} + if userAgent != "" { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ + "User-Agent": []string{userAgent}, + })) } if metaHeaders != nil { modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) @@ -187,46 +158,6 @@ func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Reque return nil } -func shouldV2Fallback(err errcode.Error) bool { - logrus.Debugf("v2 error: %T %v", err, err) - switch err.Code { - case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown: - return true - } - return false -} - -// ErrNoSupport is an error type used for errors indicating that an operation -// is not supported. It encapsulates a more specific error. -type ErrNoSupport struct{ Err error } - -func (e ErrNoSupport) Error() string { - if e.Err == nil { - return "not supported" - } - return e.Err.Error() -} - -// ContinueOnError returns true if we should fallback to the next endpoint -// as a result of this error. -func ContinueOnError(err error) bool { - switch v := err.(type) { - case errcode.Errors: - return ContinueOnError(v[0]) - case ErrNoSupport: - return ContinueOnError(v.Err) - case errcode.Error: - return shouldV2Fallback(v) - case *client.UnexpectedHTTPResponseError: - return true - } - // let's be nice and fallback if the error is a completely - // unexpected one. - // If new errors have to be handled in some way, please - // add them to the switch above. - return true -} - // NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the // default TLS configuration. func NewTransport(tlsConfig *tls.Config) *http.Transport { diff --git a/vendor/github.com/docker/docker/registry/registry_mock_test.go b/vendor/github.com/docker/docker/registry/registry_mock_test.go deleted file mode 100644 index fb19e577..00000000 --- a/vendor/github.com/docker/docker/registry/registry_mock_test.go +++ /dev/null @@ -1,476 +0,0 @@ -package registry - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "net/url" - "strconv" - "strings" - "testing" - "time" - - "github.com/docker/docker/opts" - "github.com/gorilla/mux" - - "github.com/Sirupsen/logrus" -) - -var ( - testHTTPServer *httptest.Server - testHTTPSServer *httptest.Server - testLayers = map[string]map[string]string{ - "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { - "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", - "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", - "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, - "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, - "Tty":false,"OpenStdin":false,"StdinOnce":false, - "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, - "VolumesFrom":"","Entrypoint":null},"Size":424242}`, - "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", - "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", - "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, - "layer": string([]byte{ - 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, - 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, - 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, - 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, - 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, - 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, - 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, - 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, - 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, - 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, - 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, - }), - }, - "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { - "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", - "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", - "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", - "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, - "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, - "Tty":false,"OpenStdin":false,"StdinOnce":false, - "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, - "VolumesFrom":"","Entrypoint":null},"Size":424242}`, - "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", - "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", - "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", - "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, - "layer": string([]byte{ - 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, - 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, - 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, - 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, - 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, - 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, - 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, - 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, - 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, - 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, - 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, - }), - }, - } - testRepositories = map[string]map[string]string{ - "foo42/bar": { - "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", - "test": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", - }, - } - mockHosts = map[string][]net.IP{ - "": {net.ParseIP("0.0.0.0")}, - "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, - "example.com": {net.ParseIP("42.42.42.42")}, - "other.com": {net.ParseIP("43.43.43.43")}, - } -) - -func init() { - r := mux.NewRouter() - - // /v1/ - r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") - r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") - r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") - r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods("GET", "DELETE") - r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods("GET") - r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods("PUT") - r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods("GET", "POST", "PUT") - r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") - r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") - r.HandleFunc("/v1/search", handlerSearch).Methods("GET") - - // /v2/ - r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") - - testHTTPServer = httptest.NewServer(handlerAccessLog(r)) - testHTTPSServer = httptest.NewTLSServer(handlerAccessLog(r)) - - // override net.LookupIP - lookupIP = func(host string) ([]net.IP, error) { - if host == "127.0.0.1" { - // I believe in future Go versions this will fail, so let's fix it later - return net.LookupIP(host) - } - for h, addrs := range mockHosts { - if host == h { - return addrs, nil - } - for _, addr := range addrs { - if addr.String() == host { - return []net.IP{addr}, nil - } - } - } - return nil, errors.New("lookup: no such host") - } -} - -func handlerAccessLog(handler http.Handler) http.Handler { - logHandler := func(w http.ResponseWriter, r *http.Request) { - logrus.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) - handler.ServeHTTP(w, r) - } - return http.HandlerFunc(logHandler) -} - -func makeURL(req string) string { - return testHTTPServer.URL + req -} - -func makeHTTPSURL(req string) string { - return testHTTPSServer.URL + req -} - -func makeIndex(req string) *IndexInfo { - index := &IndexInfo{ - Name: makeURL(req), - } - return index -} - -func makeHTTPSIndex(req string) *IndexInfo { - index := &IndexInfo{ - Name: makeHTTPSURL(req), - } - return index -} - -func makePublicIndex() *IndexInfo { - index := &IndexInfo{ - Name: IndexServer, - Secure: true, - Official: true, - } - return index -} - -func makeServiceConfig(mirrors []string, insecureRegistries []string) *ServiceConfig { - options := &Options{ - Mirrors: opts.NewListOpts(nil), - InsecureRegistries: opts.NewListOpts(nil), - } - if mirrors != nil { - for _, mirror := range mirrors { - options.Mirrors.Set(mirror) - } - } - if insecureRegistries != nil { - for _, insecureRegistries := range insecureRegistries { - options.InsecureRegistries.Set(insecureRegistries) - } - } - - return NewServiceConfig(options) -} - -func writeHeaders(w http.ResponseWriter) { - h := w.Header() - h.Add("Server", "docker-tests/mock") - h.Add("Expires", "-1") - h.Add("Content-Type", "application/json") - h.Add("Pragma", "no-cache") - h.Add("Cache-Control", "no-cache") - h.Add("X-Docker-Registry-Version", "0.0.0") - h.Add("X-Docker-Registry-Config", "mock") -} - -func writeResponse(w http.ResponseWriter, message interface{}, code int) { - writeHeaders(w) - w.WriteHeader(code) - body, err := json.Marshal(message) - if err != nil { - io.WriteString(w, err.Error()) - return - } - w.Write(body) -} - -func readJSON(r *http.Request, dest interface{}) error { - body, err := ioutil.ReadAll(r.Body) - if err != nil { - return err - } - return json.Unmarshal(body, dest) -} - -func apiError(w http.ResponseWriter, message string, code int) { - body := map[string]string{ - "error": message, - } - writeResponse(w, body, code) -} - -func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { - if a == b { - return - } - if len(message) == 0 { - message = fmt.Sprintf("%v != %v", a, b) - } - t.Fatal(message) -} - -func assertNotEqual(t *testing.T, a interface{}, b interface{}, message string) { - if a != b { - return - } - if len(message) == 0 { - message = fmt.Sprintf("%v == %v", a, b) - } - t.Fatal(message) -} - -// Similar to assertEqual, but does not stop test -func checkEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { - if a == b { - return - } - message := fmt.Sprintf("%v != %v", a, b) - if len(messagePrefix) != 0 { - message = messagePrefix + ": " + message - } - t.Error(message) -} - -// Similar to assertNotEqual, but does not stop test -func checkNotEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { - if a != b { - return - } - message := fmt.Sprintf("%v == %v", a, b) - if len(messagePrefix) != 0 { - message = messagePrefix + ": " + message - } - t.Error(message) -} - -func requiresAuth(w http.ResponseWriter, r *http.Request) bool { - writeCookie := func() { - value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) - cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} - http.SetCookie(w, cookie) - //FIXME(sam): this should be sent only on Index routes - value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) - w.Header().Add("X-Docker-Token", value) - } - if len(r.Cookies()) > 0 { - writeCookie() - return true - } - if len(r.Header.Get("Authorization")) > 0 { - writeCookie() - return true - } - w.Header().Add("WWW-Authenticate", "token") - apiError(w, "Wrong auth", 401) - return false -} - -func handlerGetPing(w http.ResponseWriter, r *http.Request) { - writeResponse(w, true, 200) -} - -func handlerGetImage(w http.ResponseWriter, r *http.Request) { - if !requiresAuth(w, r) { - return - } - vars := mux.Vars(r) - layer, exists := testLayers[vars["image_id"]] - if !exists { - http.NotFound(w, r) - return - } - writeHeaders(w) - layerSize := len(layer["layer"]) - w.Header().Add("X-Docker-Size", strconv.Itoa(layerSize)) - io.WriteString(w, layer[vars["action"]]) -} - -func handlerPutImage(w http.ResponseWriter, r *http.Request) { - if !requiresAuth(w, r) { - return - } - vars := mux.Vars(r) - imageID := vars["image_id"] - action := vars["action"] - layer, exists := testLayers[imageID] - if !exists { - if action != "json" { - http.NotFound(w, r) - return - } - layer = make(map[string]string) - testLayers[imageID] = layer - } - if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { - if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { - apiError(w, "Wrong checksum", 400) - return - } - } - body, err := ioutil.ReadAll(r.Body) - if err != nil { - apiError(w, fmt.Sprintf("Error: %s", err), 500) - return - } - layer[action] = string(body) - writeResponse(w, true, 200) -} - -func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { - if !requiresAuth(w, r) { - return - } - repositoryName := mux.Vars(r)["repository"] - repositoryName = NormalizeLocalName(repositoryName) - tags, exists := testRepositories[repositoryName] - if !exists { - apiError(w, "Repository not found", 404) - return - } - if r.Method == "DELETE" { - delete(testRepositories, repositoryName) - writeResponse(w, true, 200) - return - } - writeResponse(w, tags, 200) -} - -func handlerGetTag(w http.ResponseWriter, r *http.Request) { - if !requiresAuth(w, r) { - return - } - vars := mux.Vars(r) - repositoryName := vars["repository"] - repositoryName = NormalizeLocalName(repositoryName) - tagName := vars["tag"] - tags, exists := testRepositories[repositoryName] - if !exists { - apiError(w, "Repository not found", 404) - return - } - tag, exists := tags[tagName] - if !exists { - apiError(w, "Tag not found", 404) - return - } - writeResponse(w, tag, 200) -} - -func handlerPutTag(w http.ResponseWriter, r *http.Request) { - if !requiresAuth(w, r) { - return - } - vars := mux.Vars(r) - repositoryName := vars["repository"] - repositoryName = NormalizeLocalName(repositoryName) - tagName := vars["tag"] - tags, exists := testRepositories[repositoryName] - if !exists { - tags := make(map[string]string) - testRepositories[repositoryName] = tags - } - tagValue := "" - readJSON(r, tagValue) - tags[tagName] = tagValue - writeResponse(w, true, 200) -} - -func handlerUsers(w http.ResponseWriter, r *http.Request) { - code := 200 - if r.Method == "POST" { - code = 201 - } else if r.Method == "PUT" { - code = 204 - } - writeResponse(w, "", code) -} - -func handlerImages(w http.ResponseWriter, r *http.Request) { - u, _ := url.Parse(testHTTPServer.URL) - w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) - w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) - if r.Method == "PUT" { - if strings.HasSuffix(r.URL.Path, "images") { - writeResponse(w, "", 204) - return - } - writeResponse(w, "", 200) - return - } - if r.Method == "DELETE" { - writeResponse(w, "", 204) - return - } - images := []map[string]string{} - for imageID, layer := range testLayers { - image := make(map[string]string) - image["id"] = imageID - image["checksum"] = layer["checksum_tarsum"] - image["Tag"] = "latest" - images = append(images, image) - } - writeResponse(w, images, 200) -} - -func handlerAuth(w http.ResponseWriter, r *http.Request) { - writeResponse(w, "OK", 200) -} - -func handlerSearch(w http.ResponseWriter, r *http.Request) { - result := &SearchResults{ - Query: "fakequery", - NumResults: 1, - Results: []SearchResult{{Name: "fakeimage", StarCount: 42}}, - } - writeResponse(w, result, 200) -} - -func TestPing(t *testing.T) { - res, err := http.Get(makeURL("/v1/_ping")) - if err != nil { - t.Fatal(err) - } - assertEqual(t, res.StatusCode, 200, "") - assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", - "This is not a Mocked Registry") -} - -/* Uncomment this to test Mocked Registry locally with curl - * WARNING: Don't push on the repos uncommented, it'll block the tests - * -func TestWait(t *testing.T) { - logrus.Println("Test HTTP server ready and waiting:", testHTTPServer.URL) - c := make(chan int) - <-c -} - -//*/ diff --git a/vendor/github.com/docker/docker/registry/registry_test.go b/vendor/github.com/docker/docker/registry/registry_test.go deleted file mode 100644 index 7714310d..00000000 --- a/vendor/github.com/docker/docker/registry/registry_test.go +++ /dev/null @@ -1,955 +0,0 @@ -package registry - -import ( - "fmt" - "net/http" - "net/http/httputil" - "net/url" - "strings" - "testing" - - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/cliconfig" -) - -var ( - token = []string{"fake-token"} -) - -const ( - imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" - REPO = "foo42/bar" -) - -func spawnTestRegistrySession(t *testing.T) *Session { - authConfig := &cliconfig.AuthConfig{} - endpoint, err := NewEndpoint(makeIndex("/v1/"), nil, APIVersionUnknown) - if err != nil { - t.Fatal(err) - } - var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log} - tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(nil)...) - client := HTTPClient(tr) - r, err := NewSession(client, authConfig, endpoint) - if err != nil { - t.Fatal(err) - } - // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true` - // header while authenticating, in order to retrieve a token that can be later used to - // perform authenticated actions. - // - // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead, - // it will consider authenticated any request with the header `X-Docker-Token: fake-token`. - // - // Because we know that the client's transport is an `*authTransport` we simply cast it, - // in order to set the internal cached token to the fake token, and thus send that fake token - // upon every subsequent requests. - r.client.Transport.(*authTransport).token = token - return r -} - -func TestPingRegistryEndpoint(t *testing.T) { - testPing := func(index *IndexInfo, expectedStandalone bool, assertMessage string) { - ep, err := NewEndpoint(index, nil, APIVersionUnknown) - if err != nil { - t.Fatal(err) - } - regInfo, err := ep.Ping() - if err != nil { - t.Fatal(err) - } - - assertEqual(t, regInfo.Standalone, expectedStandalone, assertMessage) - } - - testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") - testPing(makeHTTPSIndex("/v1/"), true, "Expected standalone to be true (default)") - testPing(makePublicIndex(), false, "Expected standalone to be false for public index") -} - -func TestEndpoint(t *testing.T) { - // Simple wrapper to fail test if err != nil - expandEndpoint := func(index *IndexInfo) *Endpoint { - endpoint, err := NewEndpoint(index, nil, APIVersionUnknown) - if err != nil { - t.Fatal(err) - } - return endpoint - } - - assertInsecureIndex := func(index *IndexInfo) { - index.Secure = true - _, err := NewEndpoint(index, nil, APIVersionUnknown) - assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") - assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") - index.Secure = false - } - - assertSecureIndex := func(index *IndexInfo) { - index.Secure = true - _, err := NewEndpoint(index, nil, APIVersionUnknown) - assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") - assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") - index.Secure = false - } - - index := &IndexInfo{} - index.Name = makeURL("/v1/") - endpoint := expandEndpoint(index) - assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) - if endpoint.Version != APIVersion1 { - t.Fatal("Expected endpoint to be v1") - } - assertInsecureIndex(index) - - index.Name = makeURL("") - endpoint = expandEndpoint(index) - assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") - if endpoint.Version != APIVersion1 { - t.Fatal("Expected endpoint to be v1") - } - assertInsecureIndex(index) - - httpURL := makeURL("") - index.Name = strings.SplitN(httpURL, "://", 2)[1] - endpoint = expandEndpoint(index) - assertEqual(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") - if endpoint.Version != APIVersion1 { - t.Fatal("Expected endpoint to be v1") - } - assertInsecureIndex(index) - - index.Name = makeHTTPSURL("/v1/") - endpoint = expandEndpoint(index) - assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) - if endpoint.Version != APIVersion1 { - t.Fatal("Expected endpoint to be v1") - } - assertSecureIndex(index) - - index.Name = makeHTTPSURL("") - endpoint = expandEndpoint(index) - assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") - if endpoint.Version != APIVersion1 { - t.Fatal("Expected endpoint to be v1") - } - assertSecureIndex(index) - - httpsURL := makeHTTPSURL("") - index.Name = strings.SplitN(httpsURL, "://", 2)[1] - endpoint = expandEndpoint(index) - assertEqual(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") - if endpoint.Version != APIVersion1 { - t.Fatal("Expected endpoint to be v1") - } - assertSecureIndex(index) - - badEndpoints := []string{ - "http://127.0.0.1/v1/", - "https://127.0.0.1/v1/", - "http://127.0.0.1", - "https://127.0.0.1", - "127.0.0.1", - } - for _, address := range badEndpoints { - index.Name = address - _, err := NewEndpoint(index, nil, APIVersionUnknown) - checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") - } -} - -func TestGetRemoteHistory(t *testing.T) { - r := spawnTestRegistrySession(t) - hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/")) - if err != nil { - t.Fatal(err) - } - assertEqual(t, len(hist), 2, "Expected 2 images in history") - assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") - assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", - "Unexpected second ancestry") -} - -func TestLookupRemoteImage(t *testing.T) { - r := spawnTestRegistrySession(t) - err := r.LookupRemoteImage(imageID, makeURL("/v1/")) - assertEqual(t, err, nil, "Expected error of remote lookup to nil") - if err := r.LookupRemoteImage("abcdef", makeURL("/v1/")); err == nil { - t.Fatal("Expected error of remote lookup to not nil") - } -} - -func TestGetRemoteImageJSON(t *testing.T) { - r := spawnTestRegistrySession(t) - json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/")) - if err != nil { - t.Fatal(err) - } - assertEqual(t, size, int64(154), "Expected size 154") - if len(json) <= 0 { - t.Fatal("Expected non-empty json") - } - - _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/")) - if err == nil { - t.Fatal("Expected image not found error") - } -} - -func TestGetRemoteImageLayer(t *testing.T) { - r := spawnTestRegistrySession(t) - data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), 0) - if err != nil { - t.Fatal(err) - } - if data == nil { - t.Fatal("Expected non-nil data result") - } - - _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), 0) - if err == nil { - t.Fatal("Expected image not found error") - } -} - -func TestGetRemoteTag(t *testing.T) { - r := spawnTestRegistrySession(t) - tag, err := r.GetRemoteTag([]string{makeURL("/v1/")}, REPO, "test") - if err != nil { - t.Fatal(err) - } - assertEqual(t, tag, imageID, "Expected tag test to map to "+imageID) - - _, err = r.GetRemoteTag([]string{makeURL("/v1/")}, "foo42/baz", "foo") - if err != ErrRepoNotFound { - t.Fatal("Expected ErrRepoNotFound error when fetching tag for bogus repo") - } -} - -func TestGetRemoteTags(t *testing.T) { - r := spawnTestRegistrySession(t) - tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO) - if err != nil { - t.Fatal(err) - } - assertEqual(t, len(tags), 2, "Expected two tags") - assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) - assertEqual(t, tags["test"], imageID, "Expected tag test to map to "+imageID) - - _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz") - if err != ErrRepoNotFound { - t.Fatal("Expected ErrRepoNotFound error when fetching tags for bogus repo") - } -} - -func TestGetRepositoryData(t *testing.T) { - r := spawnTestRegistrySession(t) - parsedURL, err := url.Parse(makeURL("/v1/")) - if err != nil { - t.Fatal(err) - } - host := "http://" + parsedURL.Host + "/v1/" - data, err := r.GetRepositoryData("foo42/bar") - if err != nil { - t.Fatal(err) - } - assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") - assertEqual(t, len(data.Endpoints), 2, - fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) - assertEqual(t, data.Endpoints[0], host, - fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) - assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", - fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) - -} - -func TestPushImageJSONRegistry(t *testing.T) { - r := spawnTestRegistrySession(t) - imgData := &ImgData{ - ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", - Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", - } - - err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/")) - if err != nil { - t.Fatal(err) - } -} - -func TestPushImageLayerRegistry(t *testing.T) { - r := spawnTestRegistrySession(t) - layer := strings.NewReader("") - _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), []byte{}) - if err != nil { - t.Fatal(err) - } -} - -func TestValidateRepositoryName(t *testing.T) { - validRepoNames := []string{ - "docker/docker", - "library/debian", - "debian", - "docker.io/docker/docker", - "docker.io/library/debian", - "docker.io/debian", - "index.docker.io/docker/docker", - "index.docker.io/library/debian", - "index.docker.io/debian", - "127.0.0.1:5000/docker/docker", - "127.0.0.1:5000/library/debian", - "127.0.0.1:5000/debian", - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - } - invalidRepoNames := []string{ - "https://github.com/docker/docker", - "docker/Docker", - "-docker", - "-docker/docker", - "-docker.io/docker/docker", - "docker///docker", - "docker.io/docker/Docker", - "docker.io/docker///docker", - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - } - - for _, name := range invalidRepoNames { - err := ValidateRepositoryName(name) - assertNotEqual(t, err, nil, "Expected invalid repo name: "+name) - } - - for _, name := range validRepoNames { - err := ValidateRepositoryName(name) - assertEqual(t, err, nil, "Expected valid repo name: "+name) - } - - err := ValidateRepositoryName(invalidRepoNames[0]) - assertEqual(t, err, ErrInvalidRepositoryName, "Expected ErrInvalidRepositoryName: "+invalidRepoNames[0]) -} - -func TestParseRepositoryInfo(t *testing.T) { - expectedRepoInfos := map[string]RepositoryInfo{ - "fooo/bar": { - Index: &IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "fooo/bar", - LocalName: "fooo/bar", - CanonicalName: "docker.io/fooo/bar", - Official: false, - }, - "library/ubuntu": { - Index: &IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "library/ubuntu", - LocalName: "ubuntu", - CanonicalName: "docker.io/library/ubuntu", - Official: true, - }, - "nonlibrary/ubuntu": { - Index: &IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "nonlibrary/ubuntu", - LocalName: "nonlibrary/ubuntu", - CanonicalName: "docker.io/nonlibrary/ubuntu", - Official: false, - }, - "ubuntu": { - Index: &IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "library/ubuntu", - LocalName: "ubuntu", - CanonicalName: "docker.io/library/ubuntu", - Official: true, - }, - "other/library": { - Index: &IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "other/library", - LocalName: "other/library", - CanonicalName: "docker.io/other/library", - Official: false, - }, - "127.0.0.1:8000/private/moonbase": { - Index: &IndexInfo{ - Name: "127.0.0.1:8000", - Official: false, - }, - RemoteName: "private/moonbase", - LocalName: "127.0.0.1:8000/private/moonbase", - CanonicalName: "127.0.0.1:8000/private/moonbase", - Official: false, - }, - "127.0.0.1:8000/privatebase": { - Index: &IndexInfo{ - Name: "127.0.0.1:8000", - Official: false, - }, - RemoteName: "privatebase", - LocalName: "127.0.0.1:8000/privatebase", - CanonicalName: "127.0.0.1:8000/privatebase", - Official: false, - }, - "localhost:8000/private/moonbase": { - Index: &IndexInfo{ - Name: "localhost:8000", - Official: false, - }, - RemoteName: "private/moonbase", - LocalName: "localhost:8000/private/moonbase", - CanonicalName: "localhost:8000/private/moonbase", - Official: false, - }, - "localhost:8000/privatebase": { - Index: &IndexInfo{ - Name: "localhost:8000", - Official: false, - }, - RemoteName: "privatebase", - LocalName: "localhost:8000/privatebase", - CanonicalName: "localhost:8000/privatebase", - Official: false, - }, - "example.com/private/moonbase": { - Index: &IndexInfo{ - Name: "example.com", - Official: false, - }, - RemoteName: "private/moonbase", - LocalName: "example.com/private/moonbase", - CanonicalName: "example.com/private/moonbase", - Official: false, - }, - "example.com/privatebase": { - Index: &IndexInfo{ - Name: "example.com", - Official: false, - }, - RemoteName: "privatebase", - LocalName: "example.com/privatebase", - CanonicalName: "example.com/privatebase", - Official: false, - }, - "example.com:8000/private/moonbase": { - Index: &IndexInfo{ - Name: "example.com:8000", - Official: false, - }, - RemoteName: "private/moonbase", - LocalName: "example.com:8000/private/moonbase", - CanonicalName: "example.com:8000/private/moonbase", - Official: false, - }, - "example.com:8000/privatebase": { - Index: &IndexInfo{ - Name: "example.com:8000", - Official: false, - }, - RemoteName: "privatebase", - LocalName: "example.com:8000/privatebase", - CanonicalName: "example.com:8000/privatebase", - Official: false, - }, - "localhost/private/moonbase": { - Index: &IndexInfo{ - Name: "localhost", - Official: false, - }, - RemoteName: "private/moonbase", - LocalName: "localhost/private/moonbase", - CanonicalName: "localhost/private/moonbase", - Official: false, - }, - "localhost/privatebase": { - Index: &IndexInfo{ - Name: "localhost", - Official: false, - }, - RemoteName: "privatebase", - LocalName: "localhost/privatebase", - CanonicalName: "localhost/privatebase", - Official: false, - }, - IndexName + "/public/moonbase": { - Index: &IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "public/moonbase", - LocalName: "public/moonbase", - CanonicalName: "docker.io/public/moonbase", - Official: false, - }, - "index." + IndexName + "/public/moonbase": { - Index: &IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "public/moonbase", - LocalName: "public/moonbase", - CanonicalName: "docker.io/public/moonbase", - Official: false, - }, - "ubuntu-12.04-base": { - Index: &IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "library/ubuntu-12.04-base", - LocalName: "ubuntu-12.04-base", - CanonicalName: "docker.io/library/ubuntu-12.04-base", - Official: true, - }, - IndexName + "/ubuntu-12.04-base": { - Index: &IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "library/ubuntu-12.04-base", - LocalName: "ubuntu-12.04-base", - CanonicalName: "docker.io/library/ubuntu-12.04-base", - Official: true, - }, - "index." + IndexName + "/ubuntu-12.04-base": { - Index: &IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "library/ubuntu-12.04-base", - LocalName: "ubuntu-12.04-base", - CanonicalName: "docker.io/library/ubuntu-12.04-base", - Official: true, - }, - } - - for reposName, expectedRepoInfo := range expectedRepoInfos { - repoInfo, err := ParseRepositoryInfo(reposName) - if err != nil { - t.Error(err) - } else { - checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) - checkEqual(t, repoInfo.RemoteName, expectedRepoInfo.RemoteName, reposName) - checkEqual(t, repoInfo.LocalName, expectedRepoInfo.LocalName, reposName) - checkEqual(t, repoInfo.CanonicalName, expectedRepoInfo.CanonicalName, reposName) - checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) - checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) - } - } -} - -func TestNewIndexInfo(t *testing.T) { - testIndexInfo := func(config *ServiceConfig, expectedIndexInfos map[string]*IndexInfo) { - for indexName, expectedIndexInfo := range expectedIndexInfos { - index, err := config.NewIndexInfo(indexName) - if err != nil { - t.Fatal(err) - } else { - checkEqual(t, index.Name, expectedIndexInfo.Name, indexName+" name") - checkEqual(t, index.Official, expectedIndexInfo.Official, indexName+" is official") - checkEqual(t, index.Secure, expectedIndexInfo.Secure, indexName+" is secure") - checkEqual(t, len(index.Mirrors), len(expectedIndexInfo.Mirrors), indexName+" mirrors") - } - } - } - - config := NewServiceConfig(nil) - noMirrors := []string{} - expectedIndexInfos := map[string]*IndexInfo{ - IndexName: { - Name: IndexName, - Official: true, - Secure: true, - Mirrors: noMirrors, - }, - "index." + IndexName: { - Name: IndexName, - Official: true, - Secure: true, - Mirrors: noMirrors, - }, - "example.com": { - Name: "example.com", - Official: false, - Secure: true, - Mirrors: noMirrors, - }, - "127.0.0.1:5000": { - Name: "127.0.0.1:5000", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - } - testIndexInfo(config, expectedIndexInfos) - - publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} - config = makeServiceConfig(publicMirrors, []string{"example.com"}) - - expectedIndexInfos = map[string]*IndexInfo{ - IndexName: { - Name: IndexName, - Official: true, - Secure: true, - Mirrors: publicMirrors, - }, - "index." + IndexName: { - Name: IndexName, - Official: true, - Secure: true, - Mirrors: publicMirrors, - }, - "example.com": { - Name: "example.com", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - "example.com:5000": { - Name: "example.com:5000", - Official: false, - Secure: true, - Mirrors: noMirrors, - }, - "127.0.0.1": { - Name: "127.0.0.1", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - "127.0.0.1:5000": { - Name: "127.0.0.1:5000", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - "other.com": { - Name: "other.com", - Official: false, - Secure: true, - Mirrors: noMirrors, - }, - } - testIndexInfo(config, expectedIndexInfos) - - config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) - expectedIndexInfos = map[string]*IndexInfo{ - "example.com": { - Name: "example.com", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - "example.com:5000": { - Name: "example.com:5000", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - "127.0.0.1": { - Name: "127.0.0.1", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - "127.0.0.1:5000": { - Name: "127.0.0.1:5000", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - "other.com": { - Name: "other.com", - Official: false, - Secure: true, - Mirrors: noMirrors, - }, - } - testIndexInfo(config, expectedIndexInfos) -} - -func TestMirrorEndpointLookup(t *testing.T) { - containsMirror := func(endpoints []APIEndpoint) bool { - for _, pe := range endpoints { - if pe.URL == "my.mirror" { - return true - } - } - return false - } - s := Service{Config: makeServiceConfig([]string{"my.mirror"}, nil)} - imageName := IndexName + "/test/image" - - pushAPIEndpoints, err := s.LookupPushEndpoints(imageName) - if err != nil { - t.Fatal(err) - } - if containsMirror(pushAPIEndpoints) { - t.Fatal("Push endpoint should not contain mirror") - } - - pullAPIEndpoints, err := s.LookupPullEndpoints(imageName) - if err != nil { - t.Fatal(err) - } - if !containsMirror(pullAPIEndpoints) { - t.Fatal("Pull endpoint should contain mirror") - } -} - -func TestPushRegistryTag(t *testing.T) { - r := spawnTestRegistrySession(t) - err := r.PushRegistryTag("foo42/bar", imageID, "stable", makeURL("/v1/")) - if err != nil { - t.Fatal(err) - } -} - -func TestPushImageJSONIndex(t *testing.T) { - r := spawnTestRegistrySession(t) - imgData := []*ImgData{ - { - ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", - Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", - }, - { - ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", - Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", - }, - } - repoData, err := r.PushImageJSONIndex("foo42/bar", imgData, false, nil) - if err != nil { - t.Fatal(err) - } - if repoData == nil { - t.Fatal("Expected RepositoryData object") - } - repoData, err = r.PushImageJSONIndex("foo42/bar", imgData, true, []string{r.indexEndpoint.String()}) - if err != nil { - t.Fatal(err) - } - if repoData == nil { - t.Fatal("Expected RepositoryData object") - } -} - -func TestSearchRepositories(t *testing.T) { - r := spawnTestRegistrySession(t) - results, err := r.SearchRepositories("fakequery") - if err != nil { - t.Fatal(err) - } - if results == nil { - t.Fatal("Expected non-nil SearchResults object") - } - assertEqual(t, results.NumResults, 1, "Expected 1 search results") - assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") - assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") -} - -func TestValidRemoteName(t *testing.T) { - validRepositoryNames := []string{ - // Sanity check. - "docker/docker", - - // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - - // Allow embedded hyphens. - "docker-rules/docker", - - // Allow multiple hyphens as well. - "docker---rules/docker", - - //Username doc and image name docker being tested. - "doc/docker", - - // single character names are now allowed. - "d/docker", - "jess/t", - - // Consecutive underscores. - "dock__er/docker", - } - for _, repositoryName := range validRepositoryNames { - if err := validateRemoteName(repositoryName); err != nil { - t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) - } - } - - invalidRepositoryNames := []string{ - // Disallow capital letters. - "docker/Docker", - - // Only allow one slash. - "docker///docker", - - // Disallow 64-character hexadecimal. - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - - // Disallow leading and trailing hyphens in namespace. - "-docker/docker", - "docker-/docker", - "-docker-/docker", - - // Don't allow underscores everywhere (as opposed to hyphens). - "____/____", - - "_docker/_docker", - - // Disallow consecutive periods. - "dock..er/docker", - "dock_.er/docker", - "dock-.er/docker", - - // No repository. - "docker/", - - //namespace too long - "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", - } - for _, repositoryName := range invalidRepositoryNames { - if err := validateRemoteName(repositoryName); err == nil { - t.Errorf("Repository name should be invalid: %v", repositoryName) - } - } -} - -func TestTrustedLocation(t *testing.T) { - for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { - req, _ := http.NewRequest("GET", url, nil) - if trustedLocation(req) == true { - t.Fatalf("'%s' shouldn't be detected as a trusted location", url) - } - } - - for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { - req, _ := http.NewRequest("GET", url, nil) - if trustedLocation(req) == false { - t.Fatalf("'%s' should be detected as a trusted location", url) - } - } -} - -func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { - for _, urls := range [][]string{ - {"http://docker.io", "https://docker.com"}, - {"https://foo.docker.io:7777", "http://bar.docker.com"}, - {"https://foo.docker.io", "https://example.com"}, - } { - reqFrom, _ := http.NewRequest("GET", urls[0], nil) - reqFrom.Header.Add("Content-Type", "application/json") - reqFrom.Header.Add("Authorization", "super_secret") - reqTo, _ := http.NewRequest("GET", urls[1], nil) - - addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) - - if len(reqTo.Header) != 1 { - t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) - } - - if reqTo.Header.Get("Content-Type") != "application/json" { - t.Fatal("'Content-Type' should be 'application/json'") - } - - if reqTo.Header.Get("Authorization") != "" { - t.Fatal("'Authorization' should be empty") - } - } - - for _, urls := range [][]string{ - {"https://docker.io", "https://docker.com"}, - {"https://foo.docker.io:7777", "https://bar.docker.com"}, - } { - reqFrom, _ := http.NewRequest("GET", urls[0], nil) - reqFrom.Header.Add("Content-Type", "application/json") - reqFrom.Header.Add("Authorization", "super_secret") - reqTo, _ := http.NewRequest("GET", urls[1], nil) - - addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) - - if len(reqTo.Header) != 2 { - t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) - } - - if reqTo.Header.Get("Content-Type") != "application/json" { - t.Fatal("'Content-Type' should be 'application/json'") - } - - if reqTo.Header.Get("Authorization") != "super_secret" { - t.Fatal("'Authorization' should be 'super_secret'") - } - } -} - -func TestIsSecureIndex(t *testing.T) { - tests := []struct { - addr string - insecureRegistries []string - expected bool - }{ - {IndexName, nil, true}, - {"example.com", []string{}, true}, - {"example.com", []string{"example.com"}, false}, - {"localhost", []string{"localhost:5000"}, false}, - {"localhost:5000", []string{"localhost:5000"}, false}, - {"localhost", []string{"example.com"}, false}, - {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, - {"localhost", nil, false}, - {"localhost:5000", nil, false}, - {"127.0.0.1", nil, false}, - {"localhost", []string{"example.com"}, false}, - {"127.0.0.1", []string{"example.com"}, false}, - {"example.com", nil, true}, - {"example.com", []string{"example.com"}, false}, - {"127.0.0.1", []string{"example.com"}, false}, - {"127.0.0.1:5000", []string{"example.com"}, false}, - {"example.com:5000", []string{"42.42.0.0/16"}, false}, - {"example.com", []string{"42.42.0.0/16"}, false}, - {"example.com:5000", []string{"42.42.42.42/8"}, false}, - {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, - {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, - {"invalid.domain.com", []string{"42.42.0.0/16"}, true}, - {"invalid.domain.com", []string{"invalid.domain.com"}, false}, - {"invalid.domain.com:5000", []string{"invalid.domain.com"}, true}, - {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, - } - for _, tt := range tests { - config := makeServiceConfig(nil, tt.insecureRegistries) - if sec := config.isSecureIndex(tt.addr); sec != tt.expected { - t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) - } - } -} - -type debugTransport struct { - http.RoundTripper - log func(...interface{}) -} - -func (tr debugTransport) RoundTrip(req *http.Request) (*http.Response, error) { - dump, err := httputil.DumpRequestOut(req, false) - if err != nil { - tr.log("could not dump request") - } - tr.log(string(dump)) - resp, err := tr.RoundTripper.RoundTrip(req) - if err != nil { - return nil, err - } - dump, err = httputil.DumpResponse(resp, false) - if err != nil { - tr.log("could not dump response") - } - tr.log(string(dump)) - return resp, err -} diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go index 6ac930d6..cdeca585 100644 --- a/vendor/github.com/docker/docker/registry/service.go +++ b/vendor/github.com/docker/docker/registry/service.go @@ -2,66 +2,111 @@ package registry import ( "crypto/tls" + "fmt" "net/http" "net/url" + "strings" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/docker/cliconfig" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" ) // Service is a registry service. It tracks configuration data such as a list // of mirrors. type Service struct { - Config *ServiceConfig + config *serviceConfig } // NewService returns a new instance of Service ready to be // installed into an engine. -func NewService(options *Options) *Service { +func NewService(options ServiceOptions) *Service { return &Service{ - Config: NewServiceConfig(options), + config: newServiceConfig(options), } } +// ServiceConfig returns the public registry service configuration. +func (s *Service) ServiceConfig() *registrytypes.ServiceConfig { + return &s.config.ServiceConfig +} + // Auth contacts the public registry with the provided credentials, // and returns OK if authentication was successful. // It can be used to verify the validity of a client's credentials. -func (s *Service) Auth(authConfig *cliconfig.AuthConfig) (string, error) { - addr := authConfig.ServerAddress - if addr == "" { - // Use the official registry address if not specified. - addr = IndexServer +func (s *Service) Auth(authConfig *types.AuthConfig, userAgent string) (status, token string, err error) { + serverAddress := authConfig.ServerAddress + if serverAddress == "" { + serverAddress = IndexServer } - index, err := s.ResolveIndex(addr) + if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { + serverAddress = "https://" + serverAddress + } + u, err := url.Parse(serverAddress) if err != nil { - return "", err + return "", "", fmt.Errorf("unable to parse server address: %v", err) } - endpointVersion := APIVersion(APIVersionUnknown) - if V2Only { - // Override the endpoint to only attempt a v2 ping - endpointVersion = APIVersion2 + endpoints, err := s.LookupPushEndpoints(u.Host) + if err != nil { + return "", "", err } - endpoint, err := NewEndpoint(index, nil, endpointVersion) - if err != nil { - return "", err + for _, endpoint := range endpoints { + login := loginV2 + if endpoint.Version == APIVersion1 { + login = loginV1 + } + + status, token, err = login(authConfig, endpoint, userAgent) + if err == nil { + return + } + if fErr, ok := err.(fallbackError); ok { + err = fErr.err + logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err) + continue + } + return "", "", err } - authConfig.ServerAddress = endpoint.String() - return Login(authConfig, endpoint) + + return "", "", err +} + +// splitReposSearchTerm breaks a search term into an index name and remote name +func splitReposSearchTerm(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + var indexName, remoteName string + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Index repos (ex: samalba/hipache or ubuntu) + // 'docker.io' + indexName = IndexName + remoteName = reposName + } else { + indexName = nameParts[0] + remoteName = nameParts[1] + } + return indexName, remoteName } // Search queries the public registry for images matching the specified // search terms, and returns the results. -func (s *Service) Search(term string, authConfig *cliconfig.AuthConfig, headers map[string][]string) (*SearchResults, error) { +func (s *Service) Search(term string, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { + if err := validateNoSchema(term); err != nil { + return nil, err + } - repoInfo, err := s.ResolveRepositoryBySearch(term) + indexName, remoteName := splitReposSearchTerm(term) + + index, err := newIndexInfo(s.config, indexName) if err != nil { return nil, err } // *TODO: Search multiple indexes. - endpoint, err := NewEndpoint(repoInfo.Index, http.Header(headers), APIVersionUnknown) + endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers)) if err != nil { return nil, err } @@ -70,68 +115,66 @@ func (s *Service) Search(term string, authConfig *cliconfig.AuthConfig, headers if err != nil { return nil, err } - return r.SearchRepositories(repoInfo.GetSearchTerm()) + + if index.Official { + localName := remoteName + if strings.HasPrefix(localName, "library/") { + // If pull "library/foo", it's stored locally under "foo" + localName = strings.SplitN(localName, "/", 2)[1] + } + + return r.SearchRepositories(localName) + } + return r.SearchRepositories(remoteName) } // ResolveRepository splits a repository name into its components // and configuration of the associated registry. -func (s *Service) ResolveRepository(name string) (*RepositoryInfo, error) { - return s.Config.NewRepositoryInfo(name, false) -} - -// ResolveRepositoryBySearch splits a repository name into its components -// and configuration of the associated registry. -func (s *Service) ResolveRepositoryBySearch(name string) (*RepositoryInfo, error) { - return s.Config.NewRepositoryInfo(name, true) +func (s *Service) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { + return newRepositoryInfo(s.config, name) } // ResolveIndex takes indexName and returns index info -func (s *Service) ResolveIndex(name string) (*IndexInfo, error) { - return s.Config.NewIndexInfo(name) +func (s *Service) ResolveIndex(name string) (*registrytypes.IndexInfo, error) { + return newIndexInfo(s.config, name) } // APIEndpoint represents a remote API endpoint type APIEndpoint struct { - Mirror bool - URL string - Version APIVersion - Official bool - TrimHostname bool - TLSConfig *tls.Config - VersionHeader string - Versions []auth.APIVersion + Mirror bool + URL *url.URL + Version APIVersion + Official bool + TrimHostname bool + TLSConfig *tls.Config } // ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint -func (e APIEndpoint) ToV1Endpoint(metaHeaders http.Header) (*Endpoint, error) { - return newEndpoint(e.URL, e.TLSConfig, metaHeaders) +func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) } // TLSConfig constructs a client TLS configuration based on server defaults func (s *Service) TLSConfig(hostname string) (*tls.Config, error) { - return newTLSConfig(hostname, s.Config.isSecureIndex(hostname)) + return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) } -func (s *Service) tlsConfigForMirror(mirror string) (*tls.Config, error) { - mirrorURL, err := url.Parse(mirror) - if err != nil { - return nil, err - } +func (s *Service) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { return s.TLSConfig(mirrorURL.Host) } -// LookupPullEndpoints creates an list of endpoints to try to pull from, in order of preference. +// LookupPullEndpoints creates a list of endpoints to try to pull from, in order of preference. // It gives preference to v2 endpoints over v1, mirrors over the actual // registry, and HTTPS over plain HTTP. -func (s *Service) LookupPullEndpoints(repoName string) (endpoints []APIEndpoint, err error) { - return s.lookupEndpoints(repoName) +func (s *Service) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + return s.lookupEndpoints(hostname) } -// LookupPushEndpoints creates an list of endpoints to try to push to, in order of preference. +// LookupPushEndpoints creates a list of endpoints to try to push to, in order of preference. // It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. // Mirrors are not included. -func (s *Service) LookupPushEndpoints(repoName string) (endpoints []APIEndpoint, err error) { - allEndpoints, err := s.lookupEndpoints(repoName) +func (s *Service) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + allEndpoints, err := s.lookupEndpoints(hostname) if err == nil { for _, endpoint := range allEndpoints { if !endpoint.Mirror { @@ -142,17 +185,17 @@ func (s *Service) LookupPushEndpoints(repoName string) (endpoints []APIEndpoint, return endpoints, err } -func (s *Service) lookupEndpoints(repoName string) (endpoints []APIEndpoint, err error) { - endpoints, err = s.lookupV2Endpoints(repoName) +func (s *Service) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + endpoints, err = s.lookupV2Endpoints(hostname) if err != nil { return nil, err } - if V2Only { + if s.config.V2Only { return endpoints, nil } - legacyEndpoints, err := s.lookupV1Endpoints(repoName) + legacyEndpoints, err := s.lookupV1Endpoints(hostname) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/registry/service_v1.go b/vendor/github.com/docker/docker/registry/service_v1.go index ddb78ee6..56121eea 100644 --- a/vendor/github.com/docker/docker/registry/service_v1.go +++ b/vendor/github.com/docker/docker/registry/service_v1.go @@ -1,16 +1,15 @@ package registry import ( - "fmt" - "strings" + "net/url" - "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/go-connections/tlsconfig" ) -func (s *Service) lookupV1Endpoints(repoName string) (endpoints []APIEndpoint, err error) { +func (s *Service) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) { var cfg = tlsconfig.ServerDefault tlsConfig := &cfg - if strings.HasPrefix(repoName, DefaultNamespace+"/") { + if hostname == DefaultNamespace { endpoints = append(endpoints, APIEndpoint{ URL: DefaultV1Registry, Version: APIVersion1, @@ -21,12 +20,6 @@ func (s *Service) lookupV1Endpoints(repoName string) (endpoints []APIEndpoint, e return endpoints, nil } - slashIndex := strings.IndexRune(repoName, '/') - if slashIndex <= 0 { - return nil, fmt.Errorf("invalid repo name: missing '/': %s", repoName) - } - hostname := repoName[:slashIndex] - tlsConfig, err = s.TLSConfig(hostname) if err != nil { return nil, err @@ -34,7 +27,10 @@ func (s *Service) lookupV1Endpoints(repoName string) (endpoints []APIEndpoint, e endpoints = []APIEndpoint{ { - URL: "https://" + hostname, + URL: &url.URL{ + Scheme: "https", + Host: hostname, + }, Version: APIVersion1, TrimHostname: true, TLSConfig: tlsConfig, @@ -43,7 +39,10 @@ func (s *Service) lookupV1Endpoints(repoName string) (endpoints []APIEndpoint, e if tlsConfig.InsecureSkipVerify { endpoints = append(endpoints, APIEndpoint{ // or this - URL: "http://" + hostname, + URL: &url.URL{ + Scheme: "http", + Host: hostname, + }, Version: APIVersion1, TrimHostname: true, // used to check if supposed to be secure via InsecureSkipVerify diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go index 70d5fd71..4113d57d 100644 --- a/vendor/github.com/docker/docker/registry/service_v2.go +++ b/vendor/github.com/docker/docker/registry/service_v2.go @@ -1,25 +1,31 @@ package registry import ( - "fmt" + "net/url" "strings" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/go-connections/tlsconfig" ) -func (s *Service) lookupV2Endpoints(repoName string) (endpoints []APIEndpoint, err error) { +func (s *Service) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { var cfg = tlsconfig.ServerDefault tlsConfig := &cfg - if strings.HasPrefix(repoName, DefaultNamespace+"/") { + if hostname == DefaultNamespace || hostname == DefaultV1Registry.Host { // v2 mirrors - for _, mirror := range s.Config.Mirrors { - mirrorTLSConfig, err := s.tlsConfigForMirror(mirror) + for _, mirror := range s.config.Mirrors { + if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { + mirror = "https://" + mirror + } + mirrorURL, err := url.Parse(mirror) + if err != nil { + return nil, err + } + mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) if err != nil { return nil, err } endpoints = append(endpoints, APIEndpoint{ - URL: mirror, + URL: mirrorURL, // guess mirrors are v2 Version: APIVersion2, Mirror: true, @@ -39,43 +45,33 @@ func (s *Service) lookupV2Endpoints(repoName string) (endpoints []APIEndpoint, e return endpoints, nil } - slashIndex := strings.IndexRune(repoName, '/') - if slashIndex <= 0 { - return nil, fmt.Errorf("invalid repo name: missing '/': %s", repoName) - } - hostname := repoName[:slashIndex] - tlsConfig, err = s.TLSConfig(hostname) if err != nil { return nil, err } - v2Versions := []auth.APIVersion{ - { - Type: "registry", - Version: "2.0", - }, - } endpoints = []APIEndpoint{ { - URL: "https://" + hostname, - Version: APIVersion2, - TrimHostname: true, - TLSConfig: tlsConfig, - VersionHeader: DefaultRegistryVersionHeader, - Versions: v2Versions, + URL: &url.URL{ + Scheme: "https", + Host: hostname, + }, + Version: APIVersion2, + TrimHostname: true, + TLSConfig: tlsConfig, }, } if tlsConfig.InsecureSkipVerify { endpoints = append(endpoints, APIEndpoint{ - URL: "http://" + hostname, + URL: &url.URL{ + Scheme: "http", + Host: hostname, + }, Version: APIVersion2, TrimHostname: true, // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - VersionHeader: DefaultRegistryVersionHeader, - Versions: v2Versions, + TLSConfig: tlsConfig, }) } diff --git a/vendor/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go index 2a20d321..5647ad28 100644 --- a/vendor/github.com/docker/docker/registry/session.go +++ b/vendor/github.com/docker/docker/registry/session.go @@ -17,15 +17,16 @@ import ( "net/url" "strconv" "strings" - "time" "github.com/Sirupsen/logrus" - "github.com/docker/docker/cliconfig" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/tarsum" - "github.com/docker/docker/utils" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" ) var ( @@ -36,16 +37,16 @@ var ( // A Session is used to communicate with a V1 registry type Session struct { - indexEndpoint *Endpoint + indexEndpoint *V1Endpoint client *http.Client // TODO(tiborvass): remove authConfig - authConfig *cliconfig.AuthConfig + authConfig *types.AuthConfig id string } type authTransport struct { http.RoundTripper - *cliconfig.AuthConfig + *types.AuthConfig alwaysSetBasicAuth bool token []string @@ -67,7 +68,7 @@ type authTransport struct { // If the server sends a token without the client having requested it, it is ignored. // // This RoundTripper also has a CancelRequest method important for correct timeout handling. -func AuthTransport(base http.RoundTripper, authConfig *cliconfig.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { +func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { if base == nil { base = http.DefaultTransport } @@ -100,8 +101,8 @@ func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { // Authorization should not be set on 302 redirect for untrusted locations. // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. // As the authorization logic is currently implemented in RoundTrip, - // a 302 redirect is detected by looking at the Referer header as go http package adds said header. - // This is safe as Docker doesn't set Referer in other scenarios. + // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. + // This is safe as Docker doesn't set Referrer in other scenarios. if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { return tr.RoundTripper.RoundTrip(orig) } @@ -162,7 +163,7 @@ func (tr *authTransport) CancelRequest(req *http.Request) { // NewSession creates a new session // TODO(tiborvass): remove authConfig param once registry client v2 is vendored -func NewSession(client *http.Client, authConfig *cliconfig.AuthConfig, endpoint *Endpoint) (r *Session, err error) { +func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (r *Session, err error) { r = &Session{ authConfig: authConfig, client: client, @@ -174,7 +175,7 @@ func NewSession(client *http.Client, authConfig *cliconfig.AuthConfig, endpoint // If we're working with a standalone private registry over HTTPS, send Basic Auth headers // alongside all our requests. - if endpoint.VersionString(1) != IndexServer && endpoint.URL.Scheme == "https" { + if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { info, err := endpoint.Ping() if err != nil { return nil, err @@ -213,7 +214,7 @@ func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { defer res.Body.Close() if res.StatusCode != 200 { if res.StatusCode == 401 { - return nil, errLoginRequired + return nil, errcode.ErrorCodeUnauthorized.WithArgs() } return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) } @@ -269,7 +270,6 @@ func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, err // GetRemoteImageLayer retrieves an image layer from the registry func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { var ( - retries = 5 statusCode = 0 res *http.Response err error @@ -280,26 +280,19 @@ func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io if err != nil { return nil, fmt.Errorf("Error while getting from the server: %v", err) } - // TODO(tiborvass): why are we doing retries at this level? - // These retries should be generic to both v1 and v2 - for i := 1; i <= retries; i++ { - statusCode = 0 - res, err = r.client.Do(req) - if err == nil { - break - } + statusCode = 0 + res, err = r.client.Do(req) + if err != nil { logrus.Debugf("Error contacting registry %s: %v", registry, err) + // the only case err != nil && res != nil is https://golang.org/src/net/http/client.go#L515 if res != nil { if res.Body != nil { res.Body.Close() } statusCode = res.StatusCode } - if i == retries { - return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", - statusCode, imgID) - } - time.Sleep(time.Duration(i) * 5 * time.Second) + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + statusCode, imgID) } if res.StatusCode != 200 { @@ -320,7 +313,9 @@ func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io // repository. It queries each of the registries supplied in the registries // argument, and returns data from the first one that answers the query // successfully. -func (r *Session) GetRemoteTag(registries []string, repository string, askedTag string) (string, error) { +func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { + repository := repositoryRef.RemoteName() + if strings.Count(repository, "/") == 0 { // This will be removed once the registry supports auto-resolution on // the "library" namespace @@ -356,7 +351,9 @@ func (r *Session) GetRemoteTag(registries []string, repository string, askedTag // of the registries supplied in the registries argument, and returns data from // the first one that answers the query successfully. It returns a map with // tag names as the keys and image IDs as the values. -func (r *Session) GetRemoteTags(registries []string, repository string) (map[string]string, error) { +func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { + repository := repositoryRef.RemoteName() + if strings.Count(repository, "/") == 0 { // This will be removed once the registry supports auto-resolution on // the "library" namespace @@ -408,8 +405,8 @@ func buildEndpointsList(headers []string, indexEp string) ([]string, error) { } // GetRepositoryData returns lists of images and endpoints for the repository -func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { - repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), remote) +func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { + repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), name.RemoteName()) logrus.Debugf("[registry] Calling GET %s", repositoryTarget) @@ -425,14 +422,14 @@ func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { // and return a non-obtuse error message for users // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" // was a top search on the docker user forum - if utils.IsTimeout(err) { + if isTimeout(err) { return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) } return nil, fmt.Errorf("Error while pulling image: %v", err) } defer res.Body.Close() if res.StatusCode == 401 { - return nil, errLoginRequired + return nil, errcode.ErrorCodeUnauthorized.WithArgs() } // TODO: Right now we're ignoring checksums in the response body. // In the future, we need to use them to check image validity. @@ -443,12 +440,12 @@ func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, remote, errBody), res) + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, name.RemoteName(), errBody), res) } var endpoints []string if res.Header.Get("X-Docker-Endpoints") != "" { - endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) if err != nil { return nil, err } @@ -595,10 +592,10 @@ func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry // PushRegistryTag pushes a tag on the registry. // Remote has the format '/ -func (r *Session) PushRegistryTag(remote, revision, tag, registry string) error { +func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { // "jsonify" the string revision = "\"" + revision + "\"" - path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag) + path := fmt.Sprintf("repositories/%s/tags/%s", remote.RemoteName(), tag) req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) if err != nil { @@ -612,13 +609,13 @@ func (r *Session) PushRegistryTag(remote, revision, tag, registry string) error } res.Body.Close() if res.StatusCode != 200 && res.StatusCode != 201 { - return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res) + return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote.RemoteName()), res) } return nil } // PushImageJSONIndex uploads an image list to the repository -func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { +func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { cleanImgList := []*ImgData{} if validate { for _, elem := range imgList { @@ -638,7 +635,7 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate if validate { suffix = "images" } - u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote, suffix) + u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), remote.RemoteName(), suffix) logrus.Debugf("[registry] PUT %s", u) logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) headers := map[string][]string{ @@ -666,7 +663,7 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate defer res.Body.Close() if res.StatusCode == 401 { - return nil, errLoginRequired + return nil, errcode.ErrorCodeUnauthorized.WithArgs() } var tokens, endpoints []string @@ -676,7 +673,7 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote, errBody), res) + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) } tokens = res.Header["X-Docker-Token"] logrus.Debugf("Auth token: %v", tokens) @@ -684,7 +681,7 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate if res.Header.Get("X-Docker-Endpoints") == "" { return nil, fmt.Errorf("Index response didn't contain any endpoints") } - endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) if err != nil { return nil, err } @@ -694,7 +691,7 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote, errBody), res) + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) } } @@ -724,9 +721,9 @@ func shouldRedirect(response *http.Response) bool { } // SearchRepositories performs a search against the remote repository -func (r *Session) SearchRepositories(term string) (*SearchResults, error) { +func (r *Session) SearchRepositories(term string) (*registrytypes.SearchResults, error) { logrus.Debugf("Index server: %s", r.indexEndpoint) - u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term) + u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) req, err := http.NewRequest("GET", u, nil) if err != nil { @@ -742,20 +739,32 @@ func (r *Session) SearchRepositories(term string) (*SearchResults, error) { if res.StatusCode != 200 { return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) } - result := new(SearchResults) + result := new(registrytypes.SearchResults) return result, json.NewDecoder(res.Body).Decode(result) } // GetAuthConfig returns the authentication settings for a session // TODO(tiborvass): remove this once registry client v2 is vendored -func (r *Session) GetAuthConfig(withPasswd bool) *cliconfig.AuthConfig { +func (r *Session) GetAuthConfig(withPasswd bool) *types.AuthConfig { password := "" if withPasswd { password = r.authConfig.Password } - return &cliconfig.AuthConfig{ + return &types.AuthConfig{ Username: r.authConfig.Username, Password: password, - Email: r.authConfig.Email, } } + +func isTimeout(err error) bool { + type timeout interface { + Timeout() bool + } + e := err + switch urlErr := err.(type) { + case *url.Error: + e = urlErr.Err + } + t, ok := e.(timeout) + return ok && t.Timeout() +} diff --git a/vendor/github.com/docker/docker/registry/token.go b/vendor/github.com/docker/docker/registry/token.go deleted file mode 100644 index d91bd455..00000000 --- a/vendor/github.com/docker/docker/registry/token.go +++ /dev/null @@ -1,81 +0,0 @@ -package registry - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" -) - -type tokenResponse struct { - Token string `json:"token"` -} - -func getToken(username, password string, params map[string]string, registryEndpoint *Endpoint) (string, error) { - realm, ok := params["realm"] - if !ok { - return "", errors.New("no realm specified for token auth challenge") - } - - realmURL, err := url.Parse(realm) - if err != nil { - return "", fmt.Errorf("invalid token auth challenge realm: %s", err) - } - - if realmURL.Scheme == "" { - if registryEndpoint.IsSecure { - realmURL.Scheme = "https" - } else { - realmURL.Scheme = "http" - } - } - - req, err := http.NewRequest("GET", realmURL.String(), nil) - if err != nil { - return "", err - } - - reqParams := req.URL.Query() - service := params["service"] - scope := params["scope"] - - if service != "" { - reqParams.Add("service", service) - } - - for _, scopeField := range strings.Fields(scope) { - reqParams.Add("scope", scopeField) - } - - if username != "" { - reqParams.Add("account", username) - req.SetBasicAuth(username, password) - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := registryEndpoint.client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("token auth attempt for registry %s: %s request failed with status: %d %s", registryEndpoint, req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) - } - - decoder := json.NewDecoder(resp.Body) - - tr := new(tokenResponse) - if err = decoder.Decode(tr); err != nil { - return "", fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.Token == "" { - return "", errors.New("authorization server did not include a token in the response") - } - - return tr.Token, nil -} diff --git a/vendor/github.com/docker/docker/registry/types.go b/vendor/github.com/docker/docker/registry/types.go index 09b9d571..4247fed6 100644 --- a/vendor/github.com/docker/docker/registry/types.go +++ b/vendor/github.com/docker/docker/registry/types.go @@ -1,30 +1,9 @@ package registry -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial indicates whether the result is an official repository or not - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsOfficial indicates whether the result is trusted - IsTrusted bool `json:"is_trusted"` - // IsAutomated indicates whether the result is automated - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the acutal results for the search - Results []SearchResult `json:"results"` -} +import ( + "github.com/docker/docker/reference" + registrytypes "github.com/docker/engine-api/types/registry" +) // RepositoryData tracks the image list, list of endpoints, and list of tokens // for a repository @@ -67,72 +46,23 @@ func (av APIVersion) String() string { return apiVersions[av] } -var apiVersions = map[APIVersion]string{ - 1: "v1", - 2: "v2", -} - // API Version identifiers. const ( - APIVersionUnknown = iota - APIVersion1 + _ = iota + APIVersion1 APIVersion = iota APIVersion2 ) -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool +var apiVersions = map[APIVersion]string{ + APIVersion1: "v1", + APIVersion2: "v2", } // RepositoryInfo describes a repository type RepositoryInfo struct { + reference.Named // Index points to registry information - Index *IndexInfo - // RemoteName is the remote name of the repository, such as - // "library/ubuntu-12.04-base" - RemoteName string - // LocalName is the local name of the repository, such as - // "ubuntu-12.04-base" - LocalName string - // CanonicalName is the canonical name of the repository, such as - // "docker.io/library/ubuntu-12.04-base" - CanonicalName string + Index *registrytypes.IndexInfo // Official indicates whether the repository is considered official. // If the registry is official, and the normalized name does not // contain a '/' (e.g. "foo"), then it is considered an official repo. diff --git a/vendor/github.com/docker/docker/restartmanager/restartmanager.go b/vendor/github.com/docker/docker/restartmanager/restartmanager.go new file mode 100644 index 00000000..0e844de2 --- /dev/null +++ b/vendor/github.com/docker/docker/restartmanager/restartmanager.go @@ -0,0 +1,131 @@ +package restartmanager + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/docker/engine-api/types/container" +) + +const ( + backoffMultiplier = 2 + defaultTimeout = 100 * time.Millisecond +) + +// ErrRestartCanceled is returned when the restart manager has been +// canceled and will no longer restart the container. +var ErrRestartCanceled = errors.New("restart canceled") + +// RestartManager defines object that controls container restarting rules. +type RestartManager interface { + Cancel() error + ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) +} + +type restartManager struct { + sync.Mutex + sync.Once + policy container.RestartPolicy + failureCount int + timeout time.Duration + active bool + cancel chan struct{} + canceled bool +} + +// New returns a new restartmanager based on a policy. +func New(policy container.RestartPolicy) RestartManager { + return &restartManager{policy: policy, cancel: make(chan struct{})} +} + +func (rm *restartManager) SetPolicy(policy container.RestartPolicy) { + rm.Lock() + rm.policy = policy + rm.Unlock() +} + +func (rm *restartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) { + if rm.policy.IsNone() { + return false, nil, nil + } + rm.Lock() + unlockOnExit := true + defer func() { + if unlockOnExit { + rm.Unlock() + } + }() + + if rm.canceled { + return false, nil, ErrRestartCanceled + } + + if rm.active { + return false, nil, fmt.Errorf("invalid call on active restartmanager") + } + + if exitCode != 0 { + rm.failureCount++ + } else { + rm.failureCount = 0 + } + + // if the container ran for more than 10s, reguardless of status and policy reset the + // the timeout back to the default. + if executionDuration.Seconds() >= 10 { + rm.timeout = 0 + } + if rm.timeout == 0 { + rm.timeout = defaultTimeout + } else { + rm.timeout *= backoffMultiplier + } + + var restart bool + switch { + case rm.policy.IsAlways(), rm.policy.IsUnlessStopped(): + restart = true + case rm.policy.IsOnFailure(): + // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count + if max := rm.policy.MaximumRetryCount; max == 0 || rm.failureCount <= max { + restart = exitCode != 0 + } + } + + if !restart { + rm.active = false + return false, nil, nil + } + + unlockOnExit = false + rm.active = true + rm.Unlock() + + ch := make(chan error) + go func() { + select { + case <-rm.cancel: + ch <- ErrRestartCanceled + close(ch) + case <-time.After(rm.timeout): + rm.Lock() + close(ch) + rm.active = false + rm.Unlock() + } + }() + + return true, ch, nil +} + +func (rm *restartManager) Cancel() error { + rm.Do(func() { + rm.Lock() + rm.canceled = true + close(rm.cancel) + rm.Unlock() + }) + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/compare.go b/vendor/github.com/docker/docker/runconfig/compare.go index ebb8ead6..61346aab 100644 --- a/vendor/github.com/docker/docker/runconfig/compare.go +++ b/vendor/github.com/docker/docker/runconfig/compare.go @@ -1,8 +1,10 @@ package runconfig +import "github.com/docker/engine-api/types/container" + // Compare two Config struct. Do not compare the "Image" nor "Hostname" fields // If OpenStdin is set, then it differs -func Compare(a, b *Config) bool { +func Compare(a, b *container.Config) bool { if a == nil || b == nil || a.OpenStdin || b.OpenStdin { return false @@ -15,19 +17,17 @@ func Compare(a, b *Config) bool { return false } - if a.Cmd.Len() != b.Cmd.Len() || + if len(a.Cmd) != len(b.Cmd) || len(a.Env) != len(b.Env) || len(a.Labels) != len(b.Labels) || len(a.ExposedPorts) != len(b.ExposedPorts) || - a.Entrypoint.Len() != b.Entrypoint.Len() || + len(a.Entrypoint) != len(b.Entrypoint) || len(a.Volumes) != len(b.Volumes) { return false } - aCmd := a.Cmd.Slice() - bCmd := b.Cmd.Slice() - for i := 0; i < len(aCmd); i++ { - if aCmd[i] != bCmd[i] { + for i := 0; i < len(a.Cmd); i++ { + if a.Cmd[i] != b.Cmd[i] { return false } } @@ -47,10 +47,8 @@ func Compare(a, b *Config) bool { } } - aEntrypoint := a.Entrypoint.Slice() - bEntrypoint := b.Entrypoint.Slice() - for i := 0; i < len(aEntrypoint); i++ { - if aEntrypoint[i] != bEntrypoint[i] { + for i := 0; i < len(a.Entrypoint); i++ { + if a.Entrypoint[i] != b.Entrypoint[i] { return false } } diff --git a/vendor/github.com/docker/docker/runconfig/compare_test.go b/vendor/github.com/docker/docker/runconfig/compare_test.go deleted file mode 100644 index 1a5c6c6c..00000000 --- a/vendor/github.com/docker/docker/runconfig/compare_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package runconfig - -import ( - "testing" - - "github.com/docker/docker/pkg/nat" - "github.com/docker/docker/pkg/stringutils" -) - -// Just to make life easier -func newPortNoError(proto, port string) nat.Port { - p, _ := nat.NewPort(proto, port) - return p -} - -func TestCompare(t *testing.T) { - ports1 := make(nat.PortSet) - ports1[newPortNoError("tcp", "1111")] = struct{}{} - ports1[newPortNoError("tcp", "2222")] = struct{}{} - ports2 := make(nat.PortSet) - ports2[newPortNoError("tcp", "3333")] = struct{}{} - ports2[newPortNoError("tcp", "4444")] = struct{}{} - ports3 := make(nat.PortSet) - ports3[newPortNoError("tcp", "1111")] = struct{}{} - ports3[newPortNoError("tcp", "2222")] = struct{}{} - ports3[newPortNoError("tcp", "5555")] = struct{}{} - volumes1 := make(map[string]struct{}) - volumes1["/test1"] = struct{}{} - volumes2 := make(map[string]struct{}) - volumes2["/test2"] = struct{}{} - volumes3 := make(map[string]struct{}) - volumes3["/test1"] = struct{}{} - volumes3["/test3"] = struct{}{} - envs1 := []string{"ENV1=value1", "ENV2=value2"} - envs2 := []string{"ENV1=value1", "ENV3=value3"} - entrypoint1 := stringutils.NewStrSlice("/bin/sh", "-c") - entrypoint2 := stringutils.NewStrSlice("/bin/sh", "-d") - entrypoint3 := stringutils.NewStrSlice("/bin/sh", "-c", "echo") - cmd1 := stringutils.NewStrSlice("/bin/sh", "-c") - cmd2 := stringutils.NewStrSlice("/bin/sh", "-d") - cmd3 := stringutils.NewStrSlice("/bin/sh", "-c", "echo") - labels1 := map[string]string{"LABEL1": "value1", "LABEL2": "value2"} - labels2 := map[string]string{"LABEL1": "value1", "LABEL2": "value3"} - labels3 := map[string]string{"LABEL1": "value1", "LABEL2": "value2", "LABEL3": "value3"} - - sameConfigs := map[*Config]*Config{ - // Empty config - &Config{}: {}, - // Does not compare hostname, domainname & image - &Config{ - Hostname: "host1", - Domainname: "domain1", - Image: "image1", - User: "user", - }: { - Hostname: "host2", - Domainname: "domain2", - Image: "image2", - User: "user", - }, - // only OpenStdin - &Config{OpenStdin: false}: {OpenStdin: false}, - // only env - &Config{Env: envs1}: {Env: envs1}, - // only cmd - &Config{Cmd: cmd1}: {Cmd: cmd1}, - // only labels - &Config{Labels: labels1}: {Labels: labels1}, - // only exposedPorts - &Config{ExposedPorts: ports1}: {ExposedPorts: ports1}, - // only entrypoints - &Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, - // only volumes - &Config{Volumes: volumes1}: {Volumes: volumes1}, - } - differentConfigs := map[*Config]*Config{ - nil: nil, - &Config{ - Hostname: "host1", - Domainname: "domain1", - Image: "image1", - User: "user1", - }: { - Hostname: "host1", - Domainname: "domain1", - Image: "image1", - User: "user2", - }, - // only OpenStdin - &Config{OpenStdin: false}: {OpenStdin: true}, - &Config{OpenStdin: true}: {OpenStdin: false}, - // only env - &Config{Env: envs1}: {Env: envs2}, - // only cmd - &Config{Cmd: cmd1}: {Cmd: cmd2}, - // not the same number of parts - &Config{Cmd: cmd1}: {Cmd: cmd3}, - // only labels - &Config{Labels: labels1}: {Labels: labels2}, - // not the same number of labels - &Config{Labels: labels1}: {Labels: labels3}, - // only exposedPorts - &Config{ExposedPorts: ports1}: {ExposedPorts: ports2}, - // not the same number of ports - &Config{ExposedPorts: ports1}: {ExposedPorts: ports3}, - // only entrypoints - &Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, - // not the same number of parts - &Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, - // only volumes - &Config{Volumes: volumes1}: {Volumes: volumes2}, - // not the same number of labels - &Config{Volumes: volumes1}: {Volumes: volumes3}, - } - for config1, config2 := range sameConfigs { - if !Compare(config1, config2) { - t.Fatalf("Compare should be true for [%v] and [%v]", config1, config2) - } - } - for config1, config2 := range differentConfigs { - if Compare(config1, config2) { - t.Fatalf("Compare should be false for [%v] and [%v]", config1, config2) - } - } -} diff --git a/vendor/github.com/docker/docker/runconfig/config.go b/vendor/github.com/docker/docker/runconfig/config.go index 93ddb46c..9f3f9c5e 100644 --- a/vendor/github.com/docker/docker/runconfig/config.go +++ b/vendor/github.com/docker/docker/runconfig/config.go @@ -5,56 +5,21 @@ import ( "fmt" "io" - "github.com/docker/docker/pkg/nat" - "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/volume" + "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" ) -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - - // Applicable to all platforms - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - Cmd *stringutils.StrSlice // Command to run when starting the container - Entrypoint *stringutils.StrSlice // Entrypoint to run when starting the container - Env []string // List of environment variable to set in the container - ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports - Hostname string // Hostname - Image string // Name of the image as it was passed by the operator (eg. could be symbolic) - Labels map[string]string // List of labels set to this container - MacAddress string `json:",omitempty"` // Mac Address of the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled (--net=none) - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - - // Applicable to UNIX platforms - Domainname string // Domainname - PublishService string `json:",omitempty"` // Name of the network service exposed by the container - StopSignal string `json:",omitempty"` // Signal to stop a container - User string // User that will run the command(s) inside the container -} - // DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper // struct and returns both a Config and an HostConfig struct // Be aware this function is not checking whether the resulted structs are nil, // it's your business to do so -func DecodeContainerConfig(src io.Reader) (*Config, *HostConfig, error) { +func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { var w ContainerConfigWrapper decoder := json.NewDecoder(src) if err := decoder.Decode(&w); err != nil { - return nil, nil, err + return nil, nil, nil, err } hc := w.getHostConfig() @@ -62,33 +27,33 @@ func DecodeContainerConfig(src io.Reader) (*Config, *HostConfig, error) { // Perform platform-specific processing of Volumes and Binds. if w.Config != nil && hc != nil { - // Initialise the volumes map if currently nil + // Initialize the volumes map if currently nil if w.Config.Volumes == nil { w.Config.Volumes = make(map[string]struct{}) } // Now validate all the volumes and binds if err := validateVolumesAndBindSettings(w.Config, hc); err != nil { - return nil, nil, err + return nil, nil, nil, err } } // Certain parameters need daemon-side validation that cannot be done // on the client, as only the daemon knows what is valid for the platform. if err := ValidateNetMode(w.Config, hc); err != nil { - return nil, nil, err + return nil, nil, nil, err } - // Validate the isolation level - if err := ValidateIsolationLevel(hc); err != nil { - return nil, nil, err + // Validate isolation + if err := ValidateIsolation(hc); err != nil { + return nil, nil, nil, err } - return w.Config, hc, nil + return w.Config, hc, w.NetworkingConfig, nil } // validateVolumesAndBindSettings validates each of the volumes and bind settings // passed by the caller to ensure they are valid. -func validateVolumesAndBindSettings(c *Config, hc *HostConfig) error { +func validateVolumesAndBindSettings(c *container.Config, hc *container.HostConfig) error { // Ensure all volumes and binds are valid. for spec := range c.Volumes { diff --git a/vendor/github.com/docker/docker/runconfig/config_test.go b/vendor/github.com/docker/docker/runconfig/config_test.go deleted file mode 100644 index b721cf10..00000000 --- a/vendor/github.com/docker/docker/runconfig/config_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package runconfig - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "runtime" - "strings" - "testing" - - "github.com/docker/docker/pkg/stringutils" -) - -type f struct { - file string - entrypoint *stringutils.StrSlice -} - -func TestDecodeContainerConfig(t *testing.T) { - - var ( - fixtures []f - image string - ) - - if runtime.GOOS != "windows" { - image = "ubuntu" - fixtures = []f{ - {"fixtures/unix/container_config_1_14.json", stringutils.NewStrSlice()}, - {"fixtures/unix/container_config_1_17.json", stringutils.NewStrSlice("bash")}, - {"fixtures/unix/container_config_1_19.json", stringutils.NewStrSlice("bash")}, - } - } else { - image = "windows" - fixtures = []f{ - {"fixtures/windows/container_config_1_19.json", stringutils.NewStrSlice("cmd")}, - } - } - - for _, f := range fixtures { - b, err := ioutil.ReadFile(f.file) - if err != nil { - t.Fatal(err) - } - - c, h, err := DecodeContainerConfig(bytes.NewReader(b)) - if err != nil { - t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) - } - - if c.Image != image { - t.Fatalf("Expected %s image, found %s\n", image, c.Image) - } - - if c.Entrypoint.Len() != f.entrypoint.Len() { - t.Fatalf("Expected %v, found %v\n", f.entrypoint, c.Entrypoint) - } - - if h != nil && h.Memory != 1000 { - t.Fatalf("Expected memory to be 1000, found %d\n", h.Memory) - } - } -} - -// TestDecodeContainerConfigIsolation validates the isolation level passed -// to the daemon in the hostConfig structure. Note this is platform specific -// as to what level of container isolation is supported. -func TestDecodeContainerConfigIsolation(t *testing.T) { - - // An invalid isolation level - if _, _, err := callDecodeContainerConfigIsolation("invalid"); err != nil { - if !strings.Contains(err.Error(), `invalid --isolation: "invalid"`) { - t.Fatal(err) - } - } - - // Blank isolation level (== default) - if _, _, err := callDecodeContainerConfigIsolation(""); err != nil { - t.Fatal("Blank isolation should have succeeded") - } - - // Default isolation level - if _, _, err := callDecodeContainerConfigIsolation("default"); err != nil { - t.Fatal("default isolation should have succeeded") - } - - // Hyper-V Containers isolation level (Valid on Windows only) - if runtime.GOOS == "windows" { - if _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { - t.Fatal("hyperv isolation should have succeeded") - } - } else { - if _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { - if !strings.Contains(err.Error(), `invalid --isolation: "hyperv"`) { - t.Fatal(err) - } - } - } -} - -// callDecodeContainerConfigIsolation is a utility function to call -// DecodeContainerConfig for validating isolation levels -func callDecodeContainerConfigIsolation(isolation string) (*Config, *HostConfig, error) { - var ( - b []byte - err error - ) - w := ContainerConfigWrapper{ - Config: &Config{}, - HostConfig: &HostConfig{ - NetworkMode: "none", - Isolation: IsolationLevel(isolation)}, - } - if b, err = json.Marshal(w); err != nil { - return nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) - } - return DecodeContainerConfig(bytes.NewReader(b)) -} diff --git a/vendor/github.com/docker/docker/runconfig/config_unix.go b/vendor/github.com/docker/docker/runconfig/config_unix.go index 63bd0a2f..e5902fb0 100644 --- a/vendor/github.com/docker/docker/runconfig/config_unix.go +++ b/vendor/github.com/docker/docker/runconfig/config_unix.go @@ -2,18 +2,24 @@ package runconfig -// ContainerConfigWrapper is a Config wrapper that hold the container Config (portable) +import ( + "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) // and the corresponding HostConfig (non-portable). type ContainerConfigWrapper struct { - *Config - InnerHostConfig *HostConfig `json:"HostConfig,omitempty"` - Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. - *HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. + *container.Config + InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` + *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. } // getHostConfig gets the HostConfig of the Config. // It's mostly there to handle Deprecated fields of the ContainerConfigWrapper -func (w *ContainerConfigWrapper) getHostConfig() *HostConfig { +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { hc := w.HostConfig if hc == nil && w.InnerHostConfig != nil { @@ -46,7 +52,7 @@ func (w *ContainerConfigWrapper) getHostConfig() *HostConfig { } // Make sure NetworkMode has an acceptable value. We do this to ensure - // backwards compatible API behaviour. + // backwards compatible API behavior. hc = SetDefaultNetModeIfBlank(hc) return hc diff --git a/vendor/github.com/docker/docker/runconfig/config_windows.go b/vendor/github.com/docker/docker/runconfig/config_windows.go deleted file mode 100644 index 2ab8e19a..00000000 --- a/vendor/github.com/docker/docker/runconfig/config_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -package runconfig - -// ContainerConfigWrapper is a Config wrapper that hold the container Config (portable) -// and the corresponding HostConfig (non-portable). -type ContainerConfigWrapper struct { - *Config - HostConfig *HostConfig `json:"HostConfig,omitempty"` -} - -// getHostConfig gets the HostConfig of the Config. -func (w *ContainerConfigWrapper) getHostConfig() *HostConfig { - return w.HostConfig -} diff --git a/vendor/github.com/docker/docker/runconfig/errors.go b/vendor/github.com/docker/docker/runconfig/errors.go new file mode 100644 index 00000000..d3608576 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/errors.go @@ -0,0 +1,40 @@ +package runconfig + +import ( + "fmt" +) + +var ( + // ErrConflictContainerNetworkAndLinks conflict between --net=container and links + ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: container type network can't be used with links. This would result in undefined behavior") + // ErrConflictUserDefinedNetworkAndLinks conflict between --net= and links + ErrConflictUserDefinedNetworkAndLinks = fmt.Errorf("Conflicting options: networking can't be used with links. This would result in undefined behavior") + // ErrConflictSharedNetwork conflict between private and other networks + ErrConflictSharedNetwork = fmt.Errorf("Container sharing network namespace with another container or host cannot be connected to any other network") + // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. + ErrConflictHostNetwork = fmt.Errorf("Container cannot be disconnected from host network or connected to host network") + // ErrConflictNoNetwork conflict between private and other networks + ErrConflictNoNetwork = fmt.Errorf("Container cannot be connected to multiple networks with one of the networks in private (none) mode") + // ErrConflictNetworkAndDNS conflict between --dns and the network mode + ErrConflictNetworkAndDNS = fmt.Errorf("Conflicting options: dns and the network mode") + // ErrConflictNetworkHostname conflict between the hostname and the network mode + ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: hostname and the network mode") + // ErrConflictHostNetworkAndLinks conflict between --net=host and links + ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: host type networking can't be used with links. This would result in undefined behavior") + // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode + ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: mac-address and the network mode") + // ErrConflictNetworkHosts conflict between add-host and the network mode + ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: custom host-to-IP mapping and the network mode") + // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode + ErrConflictNetworkPublishPorts = fmt.Errorf("Conflicting options: port publishing and the container type network mode") + // ErrConflictNetworkExposePorts conflict between the expose option and the network mode + ErrConflictNetworkExposePorts = fmt.Errorf("Conflicting options: port exposing and the container type network mode") + // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address + ErrUnsupportedNetworkAndIP = fmt.Errorf("User specified IP address is supported on user defined networks only") + // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address + ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("User specified IP address is supported only when connecting to networks with user configured subnets") + // ErrUnsupportedNetworkAndAlias conflict between network mode and alias + ErrUnsupportedNetworkAndAlias = fmt.Errorf("Network-scoped alias is supported only for containers in user defined networks") + // ErrConflictUTSHostname conflict between the hostname and the UTS mode + ErrConflictUTSHostname = fmt.Errorf("Conflicting options: hostname and the UTS mode") +) diff --git a/vendor/github.com/docker/docker/runconfig/exec.go b/vendor/github.com/docker/docker/runconfig/exec.go deleted file mode 100644 index 6fe28ea3..00000000 --- a/vendor/github.com/docker/docker/runconfig/exec.go +++ /dev/null @@ -1,62 +0,0 @@ -package runconfig - -import ( - flag "github.com/docker/docker/pkg/mflag" -) - -// ExecConfig is a small subset of the Config struct that hold the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - Container string // Name of the container (to execute in) - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard output - AttachStdout bool // Attach the standard error - Detach bool // Execute in detach mode - Cmd []string // Execution commands and args -} - -// ParseExec parses the specified args for the specified command and generates -// an ExecConfig from it. -// If the minimal number of specified args is not right or if specified args are -// not valid, it will return an error. -func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { - var ( - flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") - flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") - flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") - flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") - flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to the command") - execCmd []string - container string - ) - cmd.Require(flag.Min, 2) - if err := cmd.ParseFlags(args, true); err != nil { - return nil, err - } - container = cmd.Arg(0) - parsedArgs := cmd.Args() - execCmd = parsedArgs[1:] - - execConfig := &ExecConfig{ - User: *flUser, - Privileged: *flPrivileged, - Tty: *flTty, - Cmd: execCmd, - Container: container, - Detach: *flDetach, - } - - // If -d is not set, attach to everything by default - if !*flDetach { - execConfig.AttachStdout = true - execConfig.AttachStderr = true - if *flStdin { - execConfig.AttachStdin = true - } - } - - return execConfig, nil -} diff --git a/vendor/github.com/docker/docker/runconfig/exec_test.go b/vendor/github.com/docker/docker/runconfig/exec_test.go deleted file mode 100644 index a4b7ea9b..00000000 --- a/vendor/github.com/docker/docker/runconfig/exec_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package runconfig - -import ( - "fmt" - "io/ioutil" - "testing" - - flag "github.com/docker/docker/pkg/mflag" -) - -type arguments struct { - args []string -} - -func TestParseExec(t *testing.T) { - invalids := map[*arguments]error{ - &arguments{[]string{"-unknown"}}: fmt.Errorf("flag provided but not defined: -unknown"), - &arguments{[]string{"-u"}}: fmt.Errorf("flag needs an argument: -u"), - &arguments{[]string{"--user"}}: fmt.Errorf("flag needs an argument: --user"), - } - valids := map[*arguments]*ExecConfig{ - &arguments{ - []string{"container", "command"}, - }: { - Container: "container", - Cmd: []string{"command"}, - AttachStdout: true, - AttachStderr: true, - }, - &arguments{ - []string{"container", "command1", "command2"}, - }: { - Container: "container", - Cmd: []string{"command1", "command2"}, - AttachStdout: true, - AttachStderr: true, - }, - &arguments{ - []string{"-i", "-t", "-u", "uid", "container", "command"}, - }: { - User: "uid", - AttachStdin: true, - AttachStdout: true, - AttachStderr: true, - Tty: true, - Container: "container", - Cmd: []string{"command"}, - }, - &arguments{ - []string{"-d", "container", "command"}, - }: { - AttachStdin: false, - AttachStdout: false, - AttachStderr: false, - Detach: true, - Container: "container", - Cmd: []string{"command"}, - }, - &arguments{ - []string{"-t", "-i", "-d", "container", "command"}, - }: { - AttachStdin: false, - AttachStdout: false, - AttachStderr: false, - Detach: true, - Tty: true, - Container: "container", - Cmd: []string{"command"}, - }, - } - for invalid, expectedError := range invalids { - cmd := flag.NewFlagSet("exec", flag.ContinueOnError) - cmd.ShortUsage = func() {} - cmd.SetOutput(ioutil.Discard) - _, err := ParseExec(cmd, invalid.args) - if err == nil || err.Error() != expectedError.Error() { - t.Fatalf("Expected an error [%v] for %v, got %v", expectedError, invalid, err) - } - - } - for valid, expectedExecConfig := range valids { - cmd := flag.NewFlagSet("exec", flag.ContinueOnError) - cmd.ShortUsage = func() {} - cmd.SetOutput(ioutil.Discard) - execConfig, err := ParseExec(cmd, valid.args) - if err != nil { - t.Fatal(err) - } - if !compareExecConfig(expectedExecConfig, execConfig) { - t.Fatalf("Expected [%v] for %v, got [%v]", expectedExecConfig, valid, execConfig) - } - } -} - -func compareExecConfig(config1 *ExecConfig, config2 *ExecConfig) bool { - if config1.AttachStderr != config2.AttachStderr { - return false - } - if config1.AttachStdin != config2.AttachStdin { - return false - } - if config1.AttachStdout != config2.AttachStdout { - return false - } - if config1.Container != config2.Container { - return false - } - if config1.Detach != config2.Detach { - return false - } - if config1.Privileged != config2.Privileged { - return false - } - if config1.Tty != config2.Tty { - return false - } - if config1.User != config2.User { - return false - } - if len(config1.Cmd) != len(config2.Cmd) { - return false - } - for index, value := range config1.Cmd { - if value != config2.Cmd[index] { - return false - } - } - return true -} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig.go b/vendor/github.com/docker/docker/runconfig/hostconfig.go index dae899c4..769cc9f5 100644 --- a/vendor/github.com/docker/docker/runconfig/hostconfig.go +++ b/vendor/github.com/docker/docker/runconfig/hostconfig.go @@ -3,221 +3,13 @@ package runconfig import ( "encoding/json" "io" - "strings" - "github.com/docker/docker/pkg/nat" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/docker/pkg/ulimit" + "github.com/docker/engine-api/types/container" ) -// KeyValuePair is a structure that hold a value for a key. -type KeyValuePair struct { - Key string - Value string -} - -// NetworkMode represents the container network stack. -type NetworkMode string - -// IsolationLevel represents the isolation level of a container. The supported -// values are platform specific -type IsolationLevel string - -// IsDefault indicates the default isolation level of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i IsolationLevel) IsDefault() bool { - return strings.ToLower(string(i)) == "default" || string(i) == "" -} - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IsPrivate indicates whether the container uses it's private ipc stack. -func (n IpcMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's ipc stack. -func (n IpcMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's ipc stack. -func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the ipc stack is valid. -func (n IpcMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses it's private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// PidMode represents the pid stack of the container. -type PidMode string - -// IsPrivate indicates whether the container uses it's private pid stack. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// IsHost indicates whether the container uses the host's pid stack. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the pid stack is valid. -func (n PidMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name string - MaximumRetryCount int -} - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the contain will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" -} - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - - // Applicable to UNIX platforms - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - CapAdd *stringutils.StrSlice // List of kernel capabilities to add to the container - CapDrop *stringutils.StrSlice // List of kernel capabilities to remove from the container - CgroupParent string // Parent cgroup. - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - KernelMemory int64 // Kernel memory limit (in bytes) - Links []string // List of links (in the name:alias form) - Memory int64 // Memory limit (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable bool // Whether to disable OOM Killer or not - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - Ulimits []*ulimit.Ulimit // List of ulimits to be set in the container - UTSMode UTSMode // UTS namespace to use for the container - - // Applicable to Windows - ConsoleSize [2]int // Initial console size - Isolation IsolationLevel // Isolation level of the container (eg default, hyperv) -} - // DecodeHostConfig creates a HostConfig based on the specified Reader. // It assumes the content of the reader will be JSON, and decodes it. -func DecodeHostConfig(src io.Reader) (*HostConfig, error) { +func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { decoder := json.NewDecoder(src) var w ContainerConfigWrapper @@ -233,10 +25,10 @@ func DecodeHostConfig(src io.Reader) (*HostConfig, error) { // to default if it is not populated. This ensures backwards compatibility after // the validation of the network mode was moved from the docker CLI to the // docker daemon. -func SetDefaultNetModeIfBlank(hc *HostConfig) *HostConfig { +func SetDefaultNetModeIfBlank(hc *container.HostConfig) *container.HostConfig { if hc != nil { - if hc.NetworkMode == NetworkMode("") { - hc.NetworkMode = NetworkMode("default") + if hc.NetworkMode == container.NetworkMode("") { + hc.NetworkMode = container.NetworkMode("default") } } return hc diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_test.go b/vendor/github.com/docker/docker/runconfig/hostconfig_test.go deleted file mode 100644 index 9fe3fa78..00000000 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_test.go +++ /dev/null @@ -1,221 +0,0 @@ -// +build !windows - -package runconfig - -import ( - "bytes" - "fmt" - "io/ioutil" - "testing" -) - -// TODO Windows: This will need addressing for a Windows daemon. -func TestNetworkModeTest(t *testing.T) { - networkModes := map[NetworkMode][]bool{ - // private, bridge, host, container, none, default - "": {true, false, false, false, false, false}, - "something:weird": {true, false, false, false, false, false}, - "bridge": {true, true, false, false, false, false}, - DefaultDaemonNetworkMode(): {true, true, false, false, false, false}, - "host": {false, false, true, false, false, false}, - "container:name": {false, false, false, true, false, false}, - "none": {true, false, false, false, true, false}, - "default": {true, false, false, false, false, true}, - } - networkModeNames := map[NetworkMode]string{ - "": "", - "something:weird": "something:weird", - "bridge": "bridge", - DefaultDaemonNetworkMode(): "bridge", - "host": "host", - "container:name": "container", - "none": "none", - "default": "default", - } - for networkMode, state := range networkModes { - if networkMode.IsPrivate() != state[0] { - t.Fatalf("NetworkMode.IsPrivate for %v should have been %v but was %v", networkMode, state[0], networkMode.IsPrivate()) - } - if networkMode.IsBridge() != state[1] { - t.Fatalf("NetworkMode.IsBridge for %v should have been %v but was %v", networkMode, state[1], networkMode.IsBridge()) - } - if networkMode.IsHost() != state[2] { - t.Fatalf("NetworkMode.IsHost for %v should have been %v but was %v", networkMode, state[2], networkMode.IsHost()) - } - if networkMode.IsContainer() != state[3] { - t.Fatalf("NetworkMode.IsContainer for %v should have been %v but was %v", networkMode, state[3], networkMode.IsContainer()) - } - if networkMode.IsNone() != state[4] { - t.Fatalf("NetworkMode.IsNone for %v should have been %v but was %v", networkMode, state[4], networkMode.IsNone()) - } - if networkMode.IsDefault() != state[5] { - t.Fatalf("NetworkMode.IsDefault for %v should have been %v but was %v", networkMode, state[5], networkMode.IsDefault()) - } - if networkMode.NetworkName() != networkModeNames[networkMode] { - t.Fatalf("Expected name %v, got %v", networkModeNames[networkMode], networkMode.NetworkName()) - } - } -} - -func TestIpcModeTest(t *testing.T) { - ipcModes := map[IpcMode][]bool{ - // private, host, container, valid - "": {true, false, false, true}, - "something:weird": {true, false, false, false}, - ":weird": {true, false, false, true}, - "host": {false, true, false, true}, - "container:name": {false, false, true, true}, - "container:name:something": {false, false, true, false}, - "container:": {false, false, true, false}, - } - for ipcMode, state := range ipcModes { - if ipcMode.IsPrivate() != state[0] { - t.Fatalf("IpcMode.IsPrivate for %v should have been %v but was %v", ipcMode, state[0], ipcMode.IsPrivate()) - } - if ipcMode.IsHost() != state[1] { - t.Fatalf("IpcMode.IsHost for %v should have been %v but was %v", ipcMode, state[1], ipcMode.IsHost()) - } - if ipcMode.IsContainer() != state[2] { - t.Fatalf("IpcMode.IsContainer for %v should have been %v but was %v", ipcMode, state[2], ipcMode.IsContainer()) - } - if ipcMode.Valid() != state[3] { - t.Fatalf("IpcMode.Valid for %v should have been %v but was %v", ipcMode, state[3], ipcMode.Valid()) - } - } - containerIpcModes := map[IpcMode]string{ - "": "", - "something": "", - "something:weird": "weird", - "container": "", - "container:": "", - "container:name": "name", - "container:name1:name2": "name1:name2", - } - for ipcMode, container := range containerIpcModes { - if ipcMode.Container() != container { - t.Fatalf("Expected %v for %v but was %v", container, ipcMode, ipcMode.Container()) - } - } -} - -func TestUTSModeTest(t *testing.T) { - utsModes := map[UTSMode][]bool{ - // private, host, valid - "": {true, false, true}, - "something:weird": {true, false, false}, - "host": {false, true, true}, - "host:name": {true, false, true}, - } - for utsMode, state := range utsModes { - if utsMode.IsPrivate() != state[0] { - t.Fatalf("UtsMode.IsPrivate for %v should have been %v but was %v", utsMode, state[0], utsMode.IsPrivate()) - } - if utsMode.IsHost() != state[1] { - t.Fatalf("UtsMode.IsHost for %v should have been %v but was %v", utsMode, state[1], utsMode.IsHost()) - } - if utsMode.Valid() != state[2] { - t.Fatalf("UtsMode.Valid for %v should have been %v but was %v", utsMode, state[2], utsMode.Valid()) - } - } -} - -func TestPidModeTest(t *testing.T) { - pidModes := map[PidMode][]bool{ - // private, host, valid - "": {true, false, true}, - "something:weird": {true, false, false}, - "host": {false, true, true}, - "host:name": {true, false, true}, - } - for pidMode, state := range pidModes { - if pidMode.IsPrivate() != state[0] { - t.Fatalf("PidMode.IsPrivate for %v should have been %v but was %v", pidMode, state[0], pidMode.IsPrivate()) - } - if pidMode.IsHost() != state[1] { - t.Fatalf("PidMode.IsHost for %v should have been %v but was %v", pidMode, state[1], pidMode.IsHost()) - } - if pidMode.Valid() != state[2] { - t.Fatalf("PidMode.Valid for %v should have been %v but was %v", pidMode, state[2], pidMode.Valid()) - } - } -} - -func TestRestartPolicy(t *testing.T) { - restartPolicies := map[RestartPolicy][]bool{ - // none, always, failure - RestartPolicy{}: {false, false, false}, - RestartPolicy{"something", 0}: {false, false, false}, - RestartPolicy{"no", 0}: {true, false, false}, - RestartPolicy{"always", 0}: {false, true, false}, - RestartPolicy{"on-failure", 0}: {false, false, true}, - } - for restartPolicy, state := range restartPolicies { - if restartPolicy.IsNone() != state[0] { - t.Fatalf("RestartPolicy.IsNone for %v should have been %v but was %v", restartPolicy, state[0], restartPolicy.IsNone()) - } - if restartPolicy.IsAlways() != state[1] { - t.Fatalf("RestartPolicy.IsAlways for %v should have been %v but was %v", restartPolicy, state[1], restartPolicy.IsAlways()) - } - if restartPolicy.IsOnFailure() != state[2] { - t.Fatalf("RestartPolicy.IsOnFailure for %v should have been %v but was %v", restartPolicy, state[2], restartPolicy.IsOnFailure()) - } - } -} - -func TestMergeConfigs(t *testing.T) { - expectedHostname := "hostname" - expectedContainerIDFile := "containerIdFile" - config := &Config{ - Hostname: expectedHostname, - } - hostConfig := &HostConfig{ - ContainerIDFile: expectedContainerIDFile, - } - containerConfigWrapper := MergeConfigs(config, hostConfig) - if containerConfigWrapper.Config.Hostname != expectedHostname { - t.Fatalf("containerConfigWrapper config hostname expected %v got %v", expectedHostname, containerConfigWrapper.Config.Hostname) - } - if containerConfigWrapper.InnerHostConfig.ContainerIDFile != expectedContainerIDFile { - t.Fatalf("containerConfigWrapper hostconfig containerIdfile expected %v got %v", expectedContainerIDFile, containerConfigWrapper.InnerHostConfig.ContainerIDFile) - } - if containerConfigWrapper.Cpuset != "" { - t.Fatalf("Expected empty Cpuset, got %v", containerConfigWrapper.Cpuset) - } -} - -func TestDecodeHostConfig(t *testing.T) { - fixtures := []struct { - file string - }{ - {"fixtures/unix/container_hostconfig_1_14.json"}, - {"fixtures/unix/container_hostconfig_1_19.json"}, - } - - for _, f := range fixtures { - b, err := ioutil.ReadFile(f.file) - if err != nil { - t.Fatal(err) - } - - c, err := DecodeHostConfig(bytes.NewReader(b)) - if err != nil { - t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) - } - - if c.Privileged != false { - t.Fatalf("Expected privileged false, found %v\n", c.Privileged) - } - - if l := len(c.Binds); l != 1 { - t.Fatalf("Expected 1 bind, found %d\n", l) - } - - if c.CapAdd.Len() != 1 && c.CapAdd.Slice()[0] != "NET_ADMIN" { - t.Fatalf("Expected CapAdd NET_ADMIN, got %v", c.CapAdd) - } - - if c.CapDrop.Len() != 1 && c.CapDrop.Slice()[0] != "NET_ADMIN" { - t.Fatalf("Expected CapDrop MKNOD, got %v", c.CapDrop) - } - } -} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go index 7aad5a33..efc26112 100644 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go @@ -6,109 +6,25 @@ import ( "fmt" "runtime" "strings" + + "github.com/docker/engine-api/types/container" ) -// IsValid indicates is an isolation level is valid -func (i IsolationLevel) IsValid() bool { - return i.IsDefault() -} - -// IsPrivate indicates whether container uses it's private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - // DefaultDaemonNetworkMode returns the default network stack the daemon should // use. -func DefaultDaemonNetworkMode() NetworkMode { - return NetworkMode("bridge") -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return "bridge" - } else if n.IsHost() { - return "host" - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return "none" - } else if n.IsDefault() { - return "default" - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") } // IsPreDefinedNetwork indicates if a network is predefined by the daemon func IsPreDefinedNetwork(network string) bool { - n := NetworkMode(network) - return n.IsBridge() || n.IsHost() || n.IsNone() -} - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} - -// MergeConfigs merges the specified container Config and HostConfig. -// It creates a ContainerConfigWrapper. -func MergeConfigs(config *Config, hostConfig *HostConfig) *ContainerConfigWrapper { - return &ContainerConfigWrapper{ - config, - hostConfig, - "", nil, - } + n := container.NetworkMode(network) + return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() } // ValidateNetMode ensures that the various combinations of requested // network settings are valid. -func ValidateNetMode(c *Config, hc *HostConfig) error { +func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { // We may not be passed a host config, such as in the case of docker commit if hc == nil { return nil @@ -120,10 +36,14 @@ func ValidateNetMode(c *Config, hc *HostConfig) error { } } - if (hc.NetworkMode.IsHost() || hc.NetworkMode.IsContainer()) && c.Hostname != "" { + if hc.NetworkMode.IsContainer() && c.Hostname != "" { return ErrConflictNetworkHostname } + if hc.UTSMode.IsHost() && c.Hostname != "" { + return ErrConflictUTSHostname + } + if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { return ErrConflictHostNetworkAndLinks } @@ -132,10 +52,6 @@ func ValidateNetMode(c *Config, hc *HostConfig) error { return ErrConflictContainerNetworkAndLinks } - if hc.NetworkMode.IsUserDefined() && len(hc.Links) > 0 { - return ErrConflictUserDefinedNetworkAndLinks - } - if (hc.NetworkMode.IsHost() || hc.NetworkMode.IsContainer()) && len(hc.DNS) > 0 { return ErrConflictNetworkAndDNS } @@ -158,10 +74,10 @@ func ValidateNetMode(c *Config, hc *HostConfig) error { return nil } -// ValidateIsolationLevel performs platform specific validation of the -// isolation level in the hostconfig structure. Linux only supports "default" +// ValidateIsolation performs platform specific validation of +// isolation in the hostconfig structure. Linux only supports "default" // which is LXC container isolation -func ValidateIsolationLevel(hc *HostConfig) error { +func ValidateIsolation(hc *container.HostConfig) error { // We may not be passed a host config, such as in the case of docker commit if hc == nil { return nil diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go deleted file mode 100644 index dbdb1683..00000000 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go +++ /dev/null @@ -1,81 +0,0 @@ -package runconfig - -import ( - "fmt" - "strings" -) - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsHyperV indicates the use of Hyper-V Containers for isolation (as opposed -// to Windows Server Containers -func (i IsolationLevel) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" -} - -// IsValid indicates is an isolation level is valid -func (i IsolationLevel) IsValid() bool { - return i.IsDefault() || i.IsHyperV() -} - -// DefaultDaemonNetworkMode returns the default network stack the daemon should -// use. -func DefaultDaemonNetworkMode() NetworkMode { - return NetworkMode("default") -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsDefault() { - return "default" - } - return "" -} - -// MergeConfigs merges the specified container Config and HostConfig. -// It creates a ContainerConfigWrapper. -func MergeConfigs(config *Config, hostConfig *HostConfig) *ContainerConfigWrapper { - return &ContainerConfigWrapper{ - config, - hostConfig, - } -} - -// IsPreDefinedNetwork indicates if a network is predefined by the daemon -func IsPreDefinedNetwork(network string) bool { - return false -} - -// ValidateNetMode ensures that the various combinations of requested -// network settings are valid. -func ValidateNetMode(c *Config, hc *HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - parts := strings.Split(string(hc.NetworkMode), ":") - switch mode := parts[0]; mode { - case "default", "none": - default: - return fmt.Errorf("invalid --net: %s", hc.NetworkMode) - } - return nil -} - -// ValidateIsolationLevel performs platform specific validation of the -// isolation level in the hostconfig structure. Windows supports 'default' (or -// blank), and 'hyperv'. These refer to Windows Server Containers and -// Hyper-V Containers respectively. -func ValidateIsolationLevel(hc *HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - if !hc.Isolation.IsValid() { - return fmt.Errorf("invalid --isolation: %q. Windows supports 'default' (Windows Server Container) or 'hyperv' (Hyper-V Container)", hc.Isolation) - } - return nil -} diff --git a/vendor/github.com/docker/docker/runconfig/merge.go b/vendor/github.com/docker/docker/runconfig/merge.go deleted file mode 100644 index 0106c7ea..00000000 --- a/vendor/github.com/docker/docker/runconfig/merge.go +++ /dev/null @@ -1,80 +0,0 @@ -package runconfig - -import ( - "strings" - - "github.com/docker/docker/pkg/nat" -) - -// Merge merges two Config, the image container configuration (defaults values), -// and the user container configuration, either passed by the API or generated -// by the cli. -// It will mutate the specified user configuration (userConf) with the image -// configuration where the user configuration is incomplete. -func Merge(userConf, imageConf *Config) error { - if userConf.User == "" { - userConf.User = imageConf.User - } - if len(userConf.ExposedPorts) == 0 { - userConf.ExposedPorts = imageConf.ExposedPorts - } else if imageConf.ExposedPorts != nil { - if userConf.ExposedPorts == nil { - userConf.ExposedPorts = make(nat.PortSet) - } - for port := range imageConf.ExposedPorts { - if _, exists := userConf.ExposedPorts[port]; !exists { - userConf.ExposedPorts[port] = struct{}{} - } - } - } - - if len(userConf.Env) == 0 { - userConf.Env = imageConf.Env - } else { - for _, imageEnv := range imageConf.Env { - found := false - imageEnvKey := strings.Split(imageEnv, "=")[0] - for _, userEnv := range userConf.Env { - userEnvKey := strings.Split(userEnv, "=")[0] - if imageEnvKey == userEnvKey { - found = true - break - } - } - if !found { - userConf.Env = append(userConf.Env, imageEnv) - } - } - } - - if userConf.Labels == nil { - userConf.Labels = map[string]string{} - } - if imageConf.Labels != nil { - for l := range userConf.Labels { - imageConf.Labels[l] = userConf.Labels[l] - } - userConf.Labels = imageConf.Labels - } - - if userConf.Entrypoint.Len() == 0 { - if userConf.Cmd.Len() == 0 { - userConf.Cmd = imageConf.Cmd - } - - if userConf.Entrypoint == nil { - userConf.Entrypoint = imageConf.Entrypoint - } - } - if userConf.WorkingDir == "" { - userConf.WorkingDir = imageConf.WorkingDir - } - if len(userConf.Volumes) == 0 { - userConf.Volumes = imageConf.Volumes - } else { - for k, v := range imageConf.Volumes { - userConf.Volumes[k] = v - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/runconfig/merge_test.go b/vendor/github.com/docker/docker/runconfig/merge_test.go deleted file mode 100644 index 6237ee9d..00000000 --- a/vendor/github.com/docker/docker/runconfig/merge_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package runconfig - -import ( - "testing" - - "github.com/docker/docker/pkg/nat" -) - -func TestMerge(t *testing.T) { - volumesImage := make(map[string]struct{}) - volumesImage["/test1"] = struct{}{} - volumesImage["/test2"] = struct{}{} - portsImage := make(nat.PortSet) - portsImage[newPortNoError("tcp", "1111")] = struct{}{} - portsImage[newPortNoError("tcp", "2222")] = struct{}{} - configImage := &Config{ - ExposedPorts: portsImage, - Env: []string{"VAR1=1", "VAR2=2"}, - Volumes: volumesImage, - } - - portsUser := make(nat.PortSet) - portsUser[newPortNoError("tcp", "2222")] = struct{}{} - portsUser[newPortNoError("tcp", "3333")] = struct{}{} - volumesUser := make(map[string]struct{}) - volumesUser["/test3"] = struct{}{} - configUser := &Config{ - ExposedPorts: portsUser, - Env: []string{"VAR2=3", "VAR3=3"}, - Volumes: volumesUser, - } - - if err := Merge(configUser, configImage); err != nil { - t.Error(err) - } - - if len(configUser.ExposedPorts) != 3 { - t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) - } - for portSpecs := range configUser.ExposedPorts { - if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { - t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) - } - } - if len(configUser.Env) != 3 { - t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) - } - for _, env := range configUser.Env { - if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { - t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) - } - } - - if len(configUser.Volumes) != 3 { - t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) - } - for v := range configUser.Volumes { - if v != "/test1" && v != "/test2" && v != "/test3" { - t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) - } - } - - ports, _, err := nat.ParsePortSpecs([]string{"0000"}) - if err != nil { - t.Error(err) - } - configImage2 := &Config{ - ExposedPorts: ports, - } - - if err := Merge(configUser, configImage2); err != nil { - t.Error(err) - } - - if len(configUser.ExposedPorts) != 4 { - t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) - } - for portSpecs := range configUser.ExposedPorts { - if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { - t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) - } - } -} diff --git a/vendor/github.com/docker/docker/opts/envfile.go b/vendor/github.com/docker/docker/runconfig/opts/envfile.go similarity index 100% rename from vendor/github.com/docker/docker/opts/envfile.go rename to vendor/github.com/docker/docker/runconfig/opts/envfile.go diff --git a/vendor/github.com/docker/docker/runconfig/opts/opts.go b/vendor/github.com/docker/docker/runconfig/opts/opts.go new file mode 100644 index 00000000..f919218d --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/opts.go @@ -0,0 +1,70 @@ +package opts + +import ( + "fmt" + fopts "github.com/docker/docker/opts" + "net" + "os" + "strings" +) + +// ValidateAttach validates that the specified string is a valid attach option. +func ValidateAttach(val string) (string, error) { + s := strings.ToLower(val) + for _, str := range []string{"stdin", "stdout", "stderr"} { + if s == str { + return s, nil + } + } + return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") +} + +// ValidateEnv validates an environment variable and returns it. +// If no value is specified, it returns the current value using os.Getenv. +// +// As on ParseEnvFile and related to #16585, environment variable names +// are not validate what so ever, it's up to application inside docker +// to validate them or not. +func ValidateEnv(val string) (string, error) { + arr := strings.Split(val, "=") + if len(arr) > 1 { + return val, nil + } + if !doesEnvExist(val) { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +func doesEnvExist(name string) bool { + for _, entry := range os.Environ() { + parts := strings.SplitN(entry, "=", 2) + if parts[0] == name { + return true + } + } + return false +} + +// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). +func ValidateExtraHost(val string) (string, error) { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + if _, err := fopts.ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return val, nil +} + +// ValidateMACAddress validates a MAC address. +func ValidateMACAddress(val string) (string, error) { + _, err := net.ParseMAC(strings.TrimSpace(val)) + if err != nil { + return "", err + } + return val, nil +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/parse.go b/vendor/github.com/docker/docker/runconfig/opts/parse.go new file mode 100644 index 00000000..6543b406 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/parse.go @@ -0,0 +1,766 @@ +package opts + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "path" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/signal" + "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" + "github.com/docker/engine-api/types/strslice" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" +) + +// Parse parses the specified args for the specified command and generates a Config, +// a HostConfig and returns them with the specified command. +// If the specified args are not valid, it will return an error. +func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, *flag.FlagSet, error) { + var ( + // FIXME: use utils.ListOpts for attach and volumes? + flAttach = opts.NewListOpts(ValidateAttach) + flVolumes = opts.NewListOpts(nil) + flTmpfs = opts.NewListOpts(nil) + flBlkioWeightDevice = NewWeightdeviceOpt(ValidateWeightDevice) + flDeviceReadBps = NewThrottledeviceOpt(ValidateThrottleBpsDevice) + flDeviceWriteBps = NewThrottledeviceOpt(ValidateThrottleBpsDevice) + flLinks = opts.NewListOpts(ValidateLink) + flAliases = opts.NewListOpts(nil) + flDeviceReadIOps = NewThrottledeviceOpt(ValidateThrottleIOpsDevice) + flDeviceWriteIOps = NewThrottledeviceOpt(ValidateThrottleIOpsDevice) + flEnv = opts.NewListOpts(ValidateEnv) + flLabels = opts.NewListOpts(ValidateEnv) + flDevices = opts.NewListOpts(ValidateDevice) + + flUlimits = NewUlimitOpt(nil) + + flPublish = opts.NewListOpts(nil) + flExpose = opts.NewListOpts(nil) + flDNS = opts.NewListOpts(opts.ValidateIPAddress) + flDNSSearch = opts.NewListOpts(opts.ValidateDNSSearch) + flDNSOptions = opts.NewListOpts(nil) + flExtraHosts = opts.NewListOpts(ValidateExtraHost) + flVolumesFrom = opts.NewListOpts(nil) + flEnvFile = opts.NewListOpts(nil) + flCapAdd = opts.NewListOpts(nil) + flCapDrop = opts.NewListOpts(nil) + flGroupAdd = opts.NewListOpts(nil) + flSecurityOpt = opts.NewListOpts(nil) + flLabelsFile = opts.NewListOpts(nil) + flLoggingOpts = opts.NewListOpts(nil) + flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to this container") + flPidMode = cmd.String([]string{"-pid"}, "", "PID namespace to use") + flUTSMode = cmd.String([]string{"-uts"}, "", "UTS namespace to use") + flUsernsMode = cmd.String([]string{"-userns"}, "", "User namespace to use") + flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to random ports") + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") + flOomKillDisable = cmd.Bool([]string{"-oom-kill-disable"}, false, "Disable OOM Killer") + flOomScoreAdj = cmd.Int([]string{"-oom-score-adj"}, 0, "Tune host's OOM preferences (-1000 to 1000)") + flContainerIDFile = cmd.String([]string{"-cidfile"}, "", "Write the container ID to the file") + flEntrypoint = cmd.String([]string{"-entrypoint"}, "", "Overwrite the default ENTRYPOINT of the image") + flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") + flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit") + flMemoryReservation = cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit") + flMemorySwap = cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flKernelMemory = cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") + flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") + flCPUShares = cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flCPUPeriod = cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flCPUQuota = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flCpusetCpus = cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flCpusetMems = cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") + flBlkioWeight = cmd.Uint16([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") + flSwappiness = cmd.Int64([]string{"-memory-swappiness"}, -1, "Tune container memory swappiness (0 to 100)") + flNetMode = cmd.String([]string{"-net"}, "default", "Connect a container to a network") + flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") + flIPv4Address = cmd.String([]string{"-ip"}, "", "Container IPv4 address (e.g. 172.30.100.104)") + flIPv6Address = cmd.String([]string{"-ip6"}, "", "Container IPv6 address (e.g. 2001:db8::33)") + flIpcMode = cmd.String([]string{"-ipc"}, "", "IPC namespace to use") + flPidsLimit = cmd.Int64([]string{"-pids-limit"}, 0, "Tune container pids limit (set -1 for unlimited)") + flRestartPolicy = cmd.String([]string{"-restart"}, "no", "Restart policy to apply when a container exits") + flReadonlyRootfs = cmd.Bool([]string{"-read-only"}, false, "Mount the container's root filesystem as read only") + flLoggingDriver = cmd.String([]string{"-log-driver"}, "", "Logging driver for container") + flCgroupParent = cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") + flVolumeDriver = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container") + flStopSignal = cmd.String([]string{"-stop-signal"}, signal.DefaultStopSignal, fmt.Sprintf("Signal to stop a container, %v by default", signal.DefaultStopSignal)) + flIsolation = cmd.String([]string{"-isolation"}, "", "Container isolation technology") + flShmSize = cmd.String([]string{"-shm-size"}, "", "Size of /dev/shm, default value is 64MB") + ) + + cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR") + cmd.Var(&flBlkioWeightDevice, []string{"-blkio-weight-device"}, "Block IO weight (relative device weight)") + cmd.Var(&flDeviceReadBps, []string{"-device-read-bps"}, "Limit read rate (bytes per second) from a device") + cmd.Var(&flDeviceWriteBps, []string{"-device-write-bps"}, "Limit write rate (bytes per second) to a device") + cmd.Var(&flDeviceReadIOps, []string{"-device-read-iops"}, "Limit read rate (IO per second) from a device") + cmd.Var(&flDeviceWriteIOps, []string{"-device-write-iops"}, "Limit write rate (IO per second) to a device") + cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume") + cmd.Var(&flTmpfs, []string{"-tmpfs"}, "Mount a tmpfs directory") + cmd.Var(&flLinks, []string{"-link"}, "Add link to another container") + cmd.Var(&flAliases, []string{"-net-alias"}, "Add network-scoped alias for the container") + cmd.Var(&flDevices, []string{"-device"}, "Add a host device to the container") + cmd.Var(&flLabels, []string{"l", "-label"}, "Set meta data on a container") + cmd.Var(&flLabelsFile, []string{"-label-file"}, "Read in a line delimited file of labels") + cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") + cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a file of environment variables") + cmd.Var(&flPublish, []string{"p", "-publish"}, "Publish a container's port(s) to the host") + cmd.Var(&flExpose, []string{"-expose"}, "Expose a port or a range of ports") + cmd.Var(&flDNS, []string{"-dns"}, "Set custom DNS servers") + cmd.Var(&flDNSSearch, []string{"-dns-search"}, "Set custom DNS search domains") + cmd.Var(&flDNSOptions, []string{"-dns-opt"}, "Set DNS options") + cmd.Var(&flExtraHosts, []string{"-add-host"}, "Add a custom host-to-IP mapping (host:ip)") + cmd.Var(&flVolumesFrom, []string{"-volumes-from"}, "Mount volumes from the specified container(s)") + cmd.Var(&flCapAdd, []string{"-cap-add"}, "Add Linux capabilities") + cmd.Var(&flCapDrop, []string{"-cap-drop"}, "Drop Linux capabilities") + cmd.Var(&flGroupAdd, []string{"-group-add"}, "Add additional groups to join") + cmd.Var(&flSecurityOpt, []string{"-security-opt"}, "Security Options") + cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") + cmd.Var(&flLoggingOpts, []string{"-log-opt"}, "Log driver options") + + cmd.Require(flag.Min, 1) + + if err := cmd.ParseFlags(args, true); err != nil { + return nil, nil, nil, cmd, err + } + + var ( + attachStdin = flAttach.Get("stdin") + attachStdout = flAttach.Get("stdout") + attachStderr = flAttach.Get("stderr") + ) + + // Validate the input mac address + if *flMacAddress != "" { + if _, err := ValidateMACAddress(*flMacAddress); err != nil { + return nil, nil, nil, cmd, fmt.Errorf("%s is not a valid mac address", *flMacAddress) + } + } + if *flStdin { + attachStdin = true + } + // If -a is not set attach to the output stdio + if flAttach.Len() == 0 { + attachStdout = true + attachStderr = true + } + + var err error + + var flMemory int64 + if *flMemoryString != "" { + flMemory, err = units.RAMInBytes(*flMemoryString) + if err != nil { + return nil, nil, nil, cmd, err + } + } + + var MemoryReservation int64 + if *flMemoryReservation != "" { + MemoryReservation, err = units.RAMInBytes(*flMemoryReservation) + if err != nil { + return nil, nil, nil, cmd, err + } + } + + var memorySwap int64 + if *flMemorySwap != "" { + if *flMemorySwap == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(*flMemorySwap) + if err != nil { + return nil, nil, nil, cmd, err + } + } + } + + var KernelMemory int64 + if *flKernelMemory != "" { + KernelMemory, err = units.RAMInBytes(*flKernelMemory) + if err != nil { + return nil, nil, nil, cmd, err + } + } + + swappiness := *flSwappiness + if swappiness != -1 && (swappiness < 0 || swappiness > 100) { + return nil, nil, nil, cmd, fmt.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness) + } + + var shmSize int64 + if *flShmSize != "" { + shmSize, err = units.RAMInBytes(*flShmSize) + if err != nil { + return nil, nil, nil, cmd, err + } + } + + var binds []string + // add any bind targets to the list of container volumes + for bind := range flVolumes.GetMap() { + if arr := volumeSplitN(bind, 2); len(arr) > 1 { + // after creating the bind mount we want to delete it from the flVolumes values because + // we do not want bind mounts being committed to image configs + binds = append(binds, bind) + flVolumes.Delete(bind) + } + } + + // Can't evaluate options passed into --tmpfs until we actually mount + tmpfs := make(map[string]string) + for _, t := range flTmpfs.GetAll() { + if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { + if _, _, err := mount.ParseTmpfsOptions(arr[1]); err != nil { + return nil, nil, nil, cmd, err + } + tmpfs[arr[0]] = arr[1] + } else { + tmpfs[arr[0]] = "" + } + } + + var ( + parsedArgs = cmd.Args() + runCmd strslice.StrSlice + entrypoint strslice.StrSlice + image = cmd.Arg(0) + ) + if len(parsedArgs) > 1 { + runCmd = strslice.StrSlice(parsedArgs[1:]) + } + if *flEntrypoint != "" { + entrypoint = strslice.StrSlice{*flEntrypoint} + } + // Validate if the given hostname is RFC 1123 (https://tools.ietf.org/html/rfc1123) compliant. + hostname := *flHostname + if hostname != "" { + // Linux hostname is limited to HOST_NAME_MAX=64, not not including the terminating null byte. + matched, _ := regexp.MatchString("^(([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])\\.)*([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])$", hostname) + if len(hostname) > 64 || !matched { + return nil, nil, nil, cmd, fmt.Errorf("invalid hostname format for --hostname: %s", hostname) + } + } + + ports, portBindings, err := nat.ParsePortSpecs(flPublish.GetAll()) + if err != nil { + return nil, nil, nil, cmd, err + } + + // Merge in exposed ports to the map of published ports + for _, e := range flExpose.GetAll() { + if strings.Contains(e, ":") { + return nil, nil, nil, cmd, fmt.Errorf("invalid port format for --expose: %s", e) + } + //support two formats for expose, original format /[] or /[] + proto, port := nat.SplitProtoPort(e) + //parse the start and end port and create a sequence of ports to expose + //if expose a port, the start and end port are the same + start, end, err := nat.ParsePortRange(port) + if err != nil { + return nil, nil, nil, cmd, fmt.Errorf("invalid range format for --expose: %s, error: %s", e, err) + } + for i := start; i <= end; i++ { + p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) + if err != nil { + return nil, nil, nil, cmd, err + } + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } + } + } + + // parse device mappings + deviceMappings := []container.DeviceMapping{} + for _, device := range flDevices.GetAll() { + deviceMapping, err := ParseDevice(device) + if err != nil { + return nil, nil, nil, cmd, err + } + deviceMappings = append(deviceMappings, deviceMapping) + } + + // collect all the environment variables for the container + envVariables, err := readKVStrings(flEnvFile.GetAll(), flEnv.GetAll()) + if err != nil { + return nil, nil, nil, cmd, err + } + + // collect all the labels for the container + labels, err := readKVStrings(flLabelsFile.GetAll(), flLabels.GetAll()) + if err != nil { + return nil, nil, nil, cmd, err + } + + ipcMode := container.IpcMode(*flIpcMode) + if !ipcMode.Valid() { + return nil, nil, nil, cmd, fmt.Errorf("--ipc: invalid IPC mode") + } + + pidMode := container.PidMode(*flPidMode) + if !pidMode.Valid() { + return nil, nil, nil, cmd, fmt.Errorf("--pid: invalid PID mode") + } + + utsMode := container.UTSMode(*flUTSMode) + if !utsMode.Valid() { + return nil, nil, nil, cmd, fmt.Errorf("--uts: invalid UTS mode") + } + + usernsMode := container.UsernsMode(*flUsernsMode) + if !usernsMode.Valid() { + return nil, nil, nil, cmd, fmt.Errorf("--userns: invalid USER mode") + } + + restartPolicy, err := ParseRestartPolicy(*flRestartPolicy) + if err != nil { + return nil, nil, nil, cmd, err + } + + loggingOpts, err := parseLoggingOpts(*flLoggingDriver, flLoggingOpts.GetAll()) + if err != nil { + return nil, nil, nil, cmd, err + } + + securityOpts, err := parseSecurityOpts(flSecurityOpt.GetAll()) + if err != nil { + return nil, nil, nil, cmd, err + } + + resources := container.Resources{ + CgroupParent: *flCgroupParent, + Memory: flMemory, + MemoryReservation: MemoryReservation, + MemorySwap: memorySwap, + MemorySwappiness: flSwappiness, + KernelMemory: KernelMemory, + OomKillDisable: flOomKillDisable, + CPUShares: *flCPUShares, + CPUPeriod: *flCPUPeriod, + CpusetCpus: *flCpusetCpus, + CpusetMems: *flCpusetMems, + CPUQuota: *flCPUQuota, + PidsLimit: *flPidsLimit, + BlkioWeight: *flBlkioWeight, + BlkioWeightDevice: flBlkioWeightDevice.GetList(), + BlkioDeviceReadBps: flDeviceReadBps.GetList(), + BlkioDeviceWriteBps: flDeviceWriteBps.GetList(), + BlkioDeviceReadIOps: flDeviceReadIOps.GetList(), + BlkioDeviceWriteIOps: flDeviceWriteIOps.GetList(), + Ulimits: flUlimits.GetList(), + Devices: deviceMappings, + } + + config := &container.Config{ + Hostname: *flHostname, + ExposedPorts: ports, + User: *flUser, + Tty: *flTty, + // TODO: deprecated, it comes from -n, --networking + // it's still needed internally to set the network to disabled + // if e.g. bridge is none in daemon opts, and in inspect + NetworkDisabled: false, + OpenStdin: *flStdin, + AttachStdin: attachStdin, + AttachStdout: attachStdout, + AttachStderr: attachStderr, + Env: envVariables, + Cmd: runCmd, + Image: image, + Volumes: flVolumes.GetMap(), + MacAddress: *flMacAddress, + Entrypoint: entrypoint, + WorkingDir: *flWorkingDir, + Labels: ConvertKVStringsToMap(labels), + } + if cmd.IsSet("-stop-signal") { + config.StopSignal = *flStopSignal + } + + hostConfig := &container.HostConfig{ + Binds: binds, + ContainerIDFile: *flContainerIDFile, + OomScoreAdj: *flOomScoreAdj, + Privileged: *flPrivileged, + PortBindings: portBindings, + Links: flLinks.GetAll(), + PublishAllPorts: *flPublishAll, + // Make sure the dns fields are never nil. + // New containers don't ever have those fields nil, + // but pre created containers can still have those nil values. + // See https://github.com/docker/docker/pull/17779 + // for a more detailed explanation on why we don't want that. + DNS: flDNS.GetAllOrEmpty(), + DNSSearch: flDNSSearch.GetAllOrEmpty(), + DNSOptions: flDNSOptions.GetAllOrEmpty(), + ExtraHosts: flExtraHosts.GetAll(), + VolumesFrom: flVolumesFrom.GetAll(), + NetworkMode: container.NetworkMode(*flNetMode), + IpcMode: ipcMode, + PidMode: pidMode, + UTSMode: utsMode, + UsernsMode: usernsMode, + CapAdd: strslice.StrSlice(flCapAdd.GetAll()), + CapDrop: strslice.StrSlice(flCapDrop.GetAll()), + GroupAdd: flGroupAdd.GetAll(), + RestartPolicy: restartPolicy, + SecurityOpt: securityOpts, + ReadonlyRootfs: *flReadonlyRootfs, + LogConfig: container.LogConfig{Type: *flLoggingDriver, Config: loggingOpts}, + VolumeDriver: *flVolumeDriver, + Isolation: container.Isolation(*flIsolation), + ShmSize: shmSize, + Resources: resources, + Tmpfs: tmpfs, + } + + // When allocating stdin in attached mode, close stdin at client disconnect + if config.OpenStdin && config.AttachStdin { + config.StdinOnce = true + } + + networkingConfig := &networktypes.NetworkingConfig{ + EndpointsConfig: make(map[string]*networktypes.EndpointSettings), + } + + if *flIPv4Address != "" || *flIPv6Address != "" { + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = &networktypes.EndpointSettings{ + IPAMConfig: &networktypes.EndpointIPAMConfig{ + IPv4Address: *flIPv4Address, + IPv6Address: *flIPv6Address, + }, + } + } + + if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 { + epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] + if epConfig == nil { + epConfig = &networktypes.EndpointSettings{} + } + epConfig.Links = make([]string, len(hostConfig.Links)) + copy(epConfig.Links, hostConfig.Links) + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + } + + if flAliases.Len() > 0 { + epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] + if epConfig == nil { + epConfig = &networktypes.EndpointSettings{} + } + epConfig.Aliases = make([]string, flAliases.Len()) + copy(epConfig.Aliases, flAliases.GetAll()) + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + } + + return config, hostConfig, networkingConfig, cmd, nil +} + +// reads a file of line terminated key=value pairs and override that with override parameter +func readKVStrings(files []string, override []string) ([]string, error) { + envVariables := []string{} + for _, ef := range files { + parsedVars, err := ParseEnvFile(ef) + if err != nil { + return nil, err + } + envVariables = append(envVariables, parsedVars...) + } + // parse the '-e' and '--env' after, to allow override + envVariables = append(envVariables, override...) + + return envVariables, nil +} + +// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} +func ConvertKVStringsToMap(values []string) map[string]string { + result := make(map[string]string, len(values)) + for _, value := range values { + kv := strings.SplitN(value, "=", 2) + if len(kv) == 1 { + result[kv[0]] = "" + } else { + result[kv[0]] = kv[1] + } + } + + return result +} + +func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { + loggingOptsMap := ConvertKVStringsToMap(loggingOpts) + if loggingDriver == "none" && len(loggingOpts) > 0 { + return map[string]string{}, fmt.Errorf("invalid logging opts for driver %s", loggingDriver) + } + return loggingOptsMap, nil +} + +// takes a local seccomp daemon, reads the file contents for sending to the daemon +func parseSecurityOpts(securityOpts []string) ([]string, error) { + for key, opt := range securityOpts { + con := strings.SplitN(opt, "=", 2) + if len(con) == 1 && con[0] != "no-new-privileges" { + if strings.Index(opt, ":") != -1 { + con = strings.SplitN(opt, ":", 2) + } else { + return securityOpts, fmt.Errorf("Invalid --security-opt: %q", opt) + } + } + if con[0] == "seccomp" && con[1] != "unconfined" { + f, err := ioutil.ReadFile(con[1]) + if err != nil { + return securityOpts, fmt.Errorf("opening seccomp profile (%s) failed: %v", con[1], err) + } + b := bytes.NewBuffer(nil) + if err := json.Compact(b, f); err != nil { + return securityOpts, fmt.Errorf("compacting json for seccomp profile (%s) failed: %v", con[1], err) + } + securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes()) + } + } + + return securityOpts, nil +} + +// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect +func ParseRestartPolicy(policy string) (container.RestartPolicy, error) { + p := container.RestartPolicy{} + + if policy == "" { + return p, nil + } + + var ( + parts = strings.Split(policy, ":") + name = parts[0] + ) + + p.Name = name + switch name { + case "always", "unless-stopped": + if len(parts) > 1 { + return p, fmt.Errorf("maximum restart count not valid with restart policy of \"%s\"", name) + } + case "no": + // do nothing + case "on-failure": + if len(parts) > 2 { + return p, fmt.Errorf("restart count format is not valid, usage: 'on-failure:N' or 'on-failure'") + } + if len(parts) == 2 { + count, err := strconv.Atoi(parts[1]) + if err != nil { + return p, err + } + + p.MaximumRetryCount = count + } + default: + return p, fmt.Errorf("invalid restart policy %s", name) + } + + return p, nil +} + +// ParseDevice parses a device mapping string to a container.DeviceMapping struct +func ParseDevice(device string) (container.DeviceMapping, error) { + src := "" + dst := "" + permissions := "rwm" + arr := strings.Split(device, ":") + switch len(arr) { + case 3: + permissions = arr[2] + fallthrough + case 2: + if ValidDeviceMode(arr[1]) { + permissions = arr[1] + } else { + dst = arr[1] + } + fallthrough + case 1: + src = arr[0] + default: + return container.DeviceMapping{}, fmt.Errorf("invalid device specification: %s", device) + } + + if dst == "" { + dst = src + } + + deviceMapping := container.DeviceMapping{ + PathOnHost: src, + PathInContainer: dst, + CgroupPermissions: permissions, + } + return deviceMapping, nil +} + +// ParseLink parses and validates the specified string as a link format (name:alias) +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") + } + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) + } + if len(arr) == 1 { + return val, val, nil + } + // This is kept because we can actually get an HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } + return arr[0], arr[1], nil +} + +// ValidateLink validates that the specified string has a valid link format (containerName:alias). +func ValidateLink(val string) (string, error) { + if _, _, err := ParseLink(val); err != nil { + return val, err + } + return val, nil +} + +// ValidDeviceMode checks if the mode for device is valid or not. +// Valid mode is a composition of r (read), w (write), and m (mknod). +func ValidDeviceMode(mode string) bool { + var legalDeviceMode = map[rune]bool{ + 'r': true, + 'w': true, + 'm': true, + } + if mode == "" { + return false + } + for _, c := range mode { + if !legalDeviceMode[c] { + return false + } + legalDeviceMode[c] = false + } + return true +} + +// ValidateDevice validates a path for devices +// It will make sure 'val' is in the form: +// [host-dir:]container-path[:mode] +// It also validates the device mode. +func ValidateDevice(val string) (string, error) { + return validatePath(val, ValidDeviceMode) +} + +func validatePath(val string, validator func(string) bool) (string, error) { + var containerPath string + var mode string + + if strings.Count(val, ":") > 2 { + return val, fmt.Errorf("bad format for path: %s", val) + } + + split := strings.SplitN(val, ":", 3) + if split[0] == "" { + return val, fmt.Errorf("bad format for path: %s", val) + } + switch len(split) { + case 1: + containerPath = split[0] + val = path.Clean(containerPath) + case 2: + if isValid := validator(split[1]); isValid { + containerPath = split[0] + mode = split[1] + val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) + } else { + containerPath = split[1] + val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) + } + case 3: + containerPath = split[1] + mode = split[2] + if isValid := validator(split[2]); !isValid { + return val, fmt.Errorf("bad mode specified: %s", mode) + } + val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) + } + + if !path.IsAbs(containerPath) { + return val, fmt.Errorf("%s is not an absolute path", containerPath) + } + return val, nil +} + +// volumeSplitN splits raw into a maximum of n parts, separated by a separator colon. +// A separator colon is the last `:` character in the regex `[:\\]?[a-zA-Z]:` (note `\\` is `\` escaped). +// In Windows driver letter appears in two situations: +// a. `^[a-zA-Z]:` (A colon followed by `^[a-zA-Z]:` is OK as colon is the separator in volume option) +// b. A string in the format like `\\?\C:\Windows\...` (UNC). +// Therefore, a driver letter can only follow either a `:` or `\\` +// This allows to correctly split strings such as `C:\foo:D:\:rw` or `/tmp/q:/foo`. +func volumeSplitN(raw string, n int) []string { + var array []string + if len(raw) == 0 || raw[0] == ':' { + // invalid + return nil + } + // numberOfParts counts the number of parts separated by a separator colon + numberOfParts := 0 + // left represents the left-most cursor in raw, updated at every `:` character considered as a separator. + left := 0 + // right represents the right-most cursor in raw incremented with the loop. Note this + // starts at index 1 as index 0 is already handle above as a special case. + for right := 1; right < len(raw); right++ { + // stop parsing if reached maximum number of parts + if n >= 0 && numberOfParts >= n { + break + } + if raw[right] != ':' { + continue + } + potentialDriveLetter := raw[right-1] + if (potentialDriveLetter >= 'A' && potentialDriveLetter <= 'Z') || (potentialDriveLetter >= 'a' && potentialDriveLetter <= 'z') { + if right > 1 { + beforePotentialDriveLetter := raw[right-2] + // Only `:` or `\\` are checked (`/` could fall into the case of `/tmp/q:/foo`) + if beforePotentialDriveLetter != ':' && beforePotentialDriveLetter != '\\' { + // e.g. `C:` is not preceded by any delimiter, therefore it was not a drive letter but a path ending with `C:`. + array = append(array, raw[left:right]) + left = right + 1 + numberOfParts++ + } + // else, `C:` is considered as a drive letter and not as a delimiter, so we continue parsing. + } + // if right == 1, then `C:` is the beginning of the raw string, therefore `:` is again not considered a delimiter and we continue parsing. + } else { + // if `:` is not preceded by a potential drive letter, then consider it as a delimiter. + array = append(array, raw[left:right]) + left = right + 1 + numberOfParts++ + } + } + // need to take care of the last part + if left < len(raw) { + if n >= 0 && numberOfParts >= n { + // if the maximum number of parts is reached, just append the rest to the last part + // left-1 is at the last `:` that needs to be included since not considered a separator. + array[n-1] += raw[left-1:] + } else { + array = append(array, raw[left:]) + } + } + return array +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go b/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go new file mode 100644 index 00000000..acb00ed5 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go @@ -0,0 +1,108 @@ +package opts + +import ( + "fmt" + "strconv" + "strings" + + "github.com/docker/engine-api/types/blkiodev" + "github.com/docker/go-units" +) + +// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. +type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error) + +// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format. +func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + rate, err := units.RAMInBytes(split[1]) + if err != nil { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) + } + if rate < 0 { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) + } + + return &blkiodev.ThrottleDevice{ + Path: split[0], + Rate: uint64(rate), + }, nil +} + +// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format. +func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + rate, err := strconv.ParseUint(split[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) + } + if rate < 0 { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) + } + + return &blkiodev.ThrottleDevice{ + Path: split[0], + Rate: uint64(rate), + }, nil +} + +// ThrottledeviceOpt defines a map of ThrottleDevices +type ThrottledeviceOpt struct { + values []*blkiodev.ThrottleDevice + validator ValidatorThrottleFctType +} + +// NewThrottledeviceOpt creates a new ThrottledeviceOpt +func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt { + values := []*blkiodev.ThrottleDevice{} + return ThrottledeviceOpt{ + values: values, + validator: validator, + } +} + +// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt +func (opt *ThrottledeviceOpt) Set(val string) error { + var value *blkiodev.ThrottleDevice + if opt.validator != nil { + v, err := opt.validator(val) + if err != nil { + return err + } + value = v + } + (opt.values) = append((opt.values), value) + return nil +} + +// String returns ThrottledeviceOpt values as a string. +func (opt *ThrottledeviceOpt) String() string { + var out []string + for _, v := range opt.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to ThrottleDevices. +func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { + var throttledevice []*blkiodev.ThrottleDevice + for _, v := range opt.values { + throttledevice = append(throttledevice, v) + } + + return throttledevice +} diff --git a/vendor/github.com/docker/docker/opts/ulimit.go b/vendor/github.com/docker/docker/runconfig/opts/ulimit.go similarity index 71% rename from vendor/github.com/docker/docker/opts/ulimit.go rename to vendor/github.com/docker/docker/runconfig/opts/ulimit.go index b41f475b..0aec91f1 100644 --- a/vendor/github.com/docker/docker/opts/ulimit.go +++ b/vendor/github.com/docker/docker/runconfig/opts/ulimit.go @@ -3,25 +3,25 @@ package opts import ( "fmt" - "github.com/docker/docker/pkg/ulimit" + "github.com/docker/go-units" ) // UlimitOpt defines a map of Ulimits type UlimitOpt struct { - values *map[string]*ulimit.Ulimit + values *map[string]*units.Ulimit } // NewUlimitOpt creates a new UlimitOpt -func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt { +func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { if ref == nil { - ref = &map[string]*ulimit.Ulimit{} + ref = &map[string]*units.Ulimit{} } return &UlimitOpt{ref} } // Set validates a Ulimit and sets its name as a key in UlimitOpt func (o *UlimitOpt) Set(val string) error { - l, err := ulimit.Parse(val) + l, err := units.ParseUlimit(val) if err != nil { return err } @@ -42,8 +42,8 @@ func (o *UlimitOpt) String() string { } // GetList returns a slice of pointers to Ulimits. -func (o *UlimitOpt) GetList() []*ulimit.Ulimit { - var ulimits []*ulimit.Ulimit +func (o *UlimitOpt) GetList() []*units.Ulimit { + var ulimits []*units.Ulimit for _, v := range *o.values { ulimits = append(ulimits, v) } diff --git a/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go b/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go new file mode 100644 index 00000000..bd3c0b17 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go @@ -0,0 +1,84 @@ +package opts + +import ( + "fmt" + "strconv" + "strings" + + "github.com/docker/engine-api/types/blkiodev" +) + +// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. +type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error) + +// ValidateWeightDevice validates that the specified string has a valid device-weight format. +func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + weight, err := strconv.ParseUint(split[1], 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid weight for device: %s", val) + } + if weight > 0 && (weight < 10 || weight > 1000) { + return nil, fmt.Errorf("invalid weight for device: %s", val) + } + + return &blkiodev.WeightDevice{ + Path: split[0], + Weight: uint16(weight), + }, nil +} + +// WeightdeviceOpt defines a map of WeightDevices +type WeightdeviceOpt struct { + values []*blkiodev.WeightDevice + validator ValidatorWeightFctType +} + +// NewWeightdeviceOpt creates a new WeightdeviceOpt +func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt { + values := []*blkiodev.WeightDevice{} + return WeightdeviceOpt{ + values: values, + validator: validator, + } +} + +// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt +func (opt *WeightdeviceOpt) Set(val string) error { + var value *blkiodev.WeightDevice + if opt.validator != nil { + v, err := opt.validator(val) + if err != nil { + return err + } + value = v + } + (opt.values) = append((opt.values), value) + return nil +} + +// String returns WeightdeviceOpt values as a string. +func (opt *WeightdeviceOpt) String() string { + var out []string + for _, v := range opt.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to WeightDevices. +func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice { + var weightdevice []*blkiodev.WeightDevice + for _, v := range opt.values { + weightdevice = append(weightdevice, v) + } + + return weightdevice +} diff --git a/vendor/github.com/docker/docker/runconfig/parse.go b/vendor/github.com/docker/docker/runconfig/parse.go deleted file mode 100644 index 09899e7f..00000000 --- a/vendor/github.com/docker/docker/runconfig/parse.go +++ /dev/null @@ -1,505 +0,0 @@ -package runconfig - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/docker/opts" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/nat" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/docker/pkg/units" - "github.com/docker/docker/volume" -) - -var ( - // ErrConflictContainerNetworkAndLinks conflict between --net=container and links - ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: --net=container can't be used with links. This would result in undefined behavior") - // ErrConflictUserDefinedNetworkAndLinks conflict between --net= and links - ErrConflictUserDefinedNetworkAndLinks = fmt.Errorf("Conflicting options: --net= can't be used with links. This would result in undefined behavior") - // ErrConflictSharedNetwork conflict between private and other networks - ErrConflictSharedNetwork = fmt.Errorf("Container sharing network namespace with another container or host cannot be connected to any other network") - // ErrConflictNoNetwork conflict between private and other networks - ErrConflictNoNetwork = fmt.Errorf("Container cannot be connected to multiple networks with one of the networks in --none mode") - // ErrConflictNetworkAndDNS conflict between --dns and the network mode - ErrConflictNetworkAndDNS = fmt.Errorf("Conflicting options: --dns and the network mode (--net)") - // ErrConflictNetworkHostname conflict between the hostname and the network mode - ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: -h and the network mode (--net)") - // ErrConflictHostNetworkAndLinks conflict between --net=host and links - ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: --net=host can't be used with links. This would result in undefined behavior") - // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode - ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: --mac-address and the network mode (--net)") - // ErrConflictNetworkHosts conflict between add-host and the network mode - ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: --add-host and the network mode (--net)") - // ErrConflictNetworkPublishPorts conflict between the pulbish options and the network mode - ErrConflictNetworkPublishPorts = fmt.Errorf("Conflicting options: -p, -P, --publish-all, --publish and the network mode (--net)") - // ErrConflictNetworkExposePorts conflict between the expose option and the network mode - ErrConflictNetworkExposePorts = fmt.Errorf("Conflicting options: --expose and the network mode (--expose)") -) - -// Parse parses the specified args for the specified command and generates a Config, -// a HostConfig and returns them with the specified command. -// If the specified args are not valid, it will return an error. -func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSet, error) { - var ( - // FIXME: use utils.ListOpts for attach and volumes? - flAttach = opts.NewListOpts(opts.ValidateAttach) - flVolumes = opts.NewListOpts(nil) - flLinks = opts.NewListOpts(opts.ValidateLink) - flEnv = opts.NewListOpts(opts.ValidateEnv) - flLabels = opts.NewListOpts(opts.ValidateEnv) - flDevices = opts.NewListOpts(opts.ValidateDevice) - - flUlimits = opts.NewUlimitOpt(nil) - - flPublish = opts.NewListOpts(nil) - flExpose = opts.NewListOpts(nil) - flDNS = opts.NewListOpts(opts.ValidateIPAddress) - flDNSSearch = opts.NewListOpts(opts.ValidateDNSSearch) - flDNSOptions = opts.NewListOpts(nil) - flExtraHosts = opts.NewListOpts(opts.ValidateExtraHost) - flVolumesFrom = opts.NewListOpts(nil) - flEnvFile = opts.NewListOpts(nil) - flCapAdd = opts.NewListOpts(nil) - flCapDrop = opts.NewListOpts(nil) - flGroupAdd = opts.NewListOpts(nil) - flSecurityOpt = opts.NewListOpts(nil) - flLabelsFile = opts.NewListOpts(nil) - flLoggingOpts = opts.NewListOpts(nil) - flNetwork = cmd.Bool([]string{"#n", "#-networking"}, true, "Enable networking for this container") - flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container") - flPidMode = cmd.String([]string{"-pid"}, "", "PID namespace to use") - flUTSMode = cmd.String([]string{"-uts"}, "", "UTS namespace to use") - flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to random ports") - flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") - flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") - flOomKillDisable = cmd.Bool([]string{"-oom-kill-disable"}, false, "Disable OOM Killer") - flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file") - flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default ENTRYPOINT of the image") - flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") - flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit") - flMemoryReservation = cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit") - flMemorySwap = cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") - flKernelMemory = cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit") - flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") - flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") - flCPUShares = cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - flCPUPeriod = cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") - flCPUQuota = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") - flCpusetCpus = cmd.String([]string{"#-cpuset", "-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") - flCpusetMems = cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") - flBlkioWeight = cmd.Uint16([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") - flSwappiness = cmd.Int64([]string{"-memory-swappiness"}, -1, "Tuning container memory swappiness (0 to 100)") - flNetMode = cmd.String([]string{"-net"}, "default", "Set the Network for the container") - flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") - flIpcMode = cmd.String([]string{"-ipc"}, "", "IPC namespace to use") - flRestartPolicy = cmd.String([]string{"-restart"}, "no", "Restart policy to apply when a container exits") - flReadonlyRootfs = cmd.Bool([]string{"-read-only"}, false, "Mount the container's root filesystem as read only") - flLoggingDriver = cmd.String([]string{"-log-driver"}, "", "Logging driver for container") - flCgroupParent = cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") - flVolumeDriver = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container") - flStopSignal = cmd.String([]string{"-stop-signal"}, signal.DefaultStopSignal, fmt.Sprintf("Signal to stop a container, %v by default", signal.DefaultStopSignal)) - flIsolation = cmd.String([]string{"-isolation"}, "", "Container isolation level") - ) - - cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR") - cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume") - cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container") - cmd.Var(&flDevices, []string{"-device"}, "Add a host device to the container") - cmd.Var(&flLabels, []string{"l", "-label"}, "Set meta data on a container") - cmd.Var(&flLabelsFile, []string{"-label-file"}, "Read in a line delimited file of labels") - cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") - cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a file of environment variables") - cmd.Var(&flPublish, []string{"p", "-publish"}, "Publish a container's port(s) to the host") - cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port or a range of ports") - cmd.Var(&flDNS, []string{"#dns", "-dns"}, "Set custom DNS servers") - cmd.Var(&flDNSSearch, []string{"-dns-search"}, "Set custom DNS search domains") - cmd.Var(&flDNSOptions, []string{"-dns-opt"}, "Set DNS options") - cmd.Var(&flExtraHosts, []string{"-add-host"}, "Add a custom host-to-IP mapping (host:ip)") - cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") - cmd.Var(&flCapAdd, []string{"-cap-add"}, "Add Linux capabilities") - cmd.Var(&flCapDrop, []string{"-cap-drop"}, "Drop Linux capabilities") - cmd.Var(&flGroupAdd, []string{"-group-add"}, "Add additional groups to join") - cmd.Var(&flSecurityOpt, []string{"-security-opt"}, "Security Options") - cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") - cmd.Var(&flLoggingOpts, []string{"-log-opt"}, "Log driver options") - - cmd.Require(flag.Min, 1) - - if err := cmd.ParseFlags(args, true); err != nil { - return nil, nil, cmd, err - } - - var ( - attachStdin = flAttach.Get("stdin") - attachStdout = flAttach.Get("stdout") - attachStderr = flAttach.Get("stderr") - ) - - // Validate the input mac address - if *flMacAddress != "" { - if _, err := opts.ValidateMACAddress(*flMacAddress); err != nil { - return nil, nil, cmd, fmt.Errorf("%s is not a valid mac address", *flMacAddress) - } - } - if *flStdin { - attachStdin = true - } - // If -a is not set attach to the output stdio - if flAttach.Len() == 0 { - attachStdout = true - attachStderr = true - } - - var err error - - var flMemory int64 - if *flMemoryString != "" { - flMemory, err = units.RAMInBytes(*flMemoryString) - if err != nil { - return nil, nil, cmd, err - } - } - - var MemoryReservation int64 - if *flMemoryReservation != "" { - MemoryReservation, err = units.RAMInBytes(*flMemoryReservation) - if err != nil { - return nil, nil, cmd, err - } - } - - var memorySwap int64 - if *flMemorySwap != "" { - if *flMemorySwap == "-1" { - memorySwap = -1 - } else { - memorySwap, err = units.RAMInBytes(*flMemorySwap) - if err != nil { - return nil, nil, cmd, err - } - } - } - - var KernelMemory int64 - if *flKernelMemory != "" { - KernelMemory, err = units.RAMInBytes(*flKernelMemory) - if err != nil { - return nil, nil, cmd, err - } - } - - swappiness := *flSwappiness - if swappiness != -1 && (swappiness < 0 || swappiness > 100) { - return nil, nil, cmd, fmt.Errorf("Invalid value: %d. Valid memory swappiness range is 0-100", swappiness) - } - - var binds []string - // add any bind targets to the list of container volumes - for bind := range flVolumes.GetMap() { - if arr := volume.SplitN(bind, 2); len(arr) > 1 { - // after creating the bind mount we want to delete it from the flVolumes values because - // we do not want bind mounts being committed to image configs - binds = append(binds, bind) - flVolumes.Delete(bind) - } - } - - var ( - parsedArgs = cmd.Args() - runCmd *stringutils.StrSlice - entrypoint *stringutils.StrSlice - image = cmd.Arg(0) - ) - if len(parsedArgs) > 1 { - runCmd = stringutils.NewStrSlice(parsedArgs[1:]...) - } - if *flEntrypoint != "" { - entrypoint = stringutils.NewStrSlice(*flEntrypoint) - } - - var ( - domainname string - hostname = *flHostname - parts = strings.SplitN(hostname, ".", 2) - ) - if len(parts) > 1 { - hostname = parts[0] - domainname = parts[1] - } - - ports, portBindings, err := nat.ParsePortSpecs(flPublish.GetAll()) - if err != nil { - return nil, nil, cmd, err - } - - // Merge in exposed ports to the map of published ports - for _, e := range flExpose.GetAll() { - if strings.Contains(e, ":") { - return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e) - } - //support two formats for expose, original format /[] or /[] - proto, port := nat.SplitProtoPort(e) - //parse the start and end port and create a sequence of ports to expose - //if expose a port, the start and end port are the same - start, end, err := parsers.ParsePortRange(port) - if err != nil { - return nil, nil, cmd, fmt.Errorf("Invalid range format for --expose: %s, error: %s", e, err) - } - for i := start; i <= end; i++ { - p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) - if err != nil { - return nil, nil, cmd, err - } - if _, exists := ports[p]; !exists { - ports[p] = struct{}{} - } - } - } - - // parse device mappings - deviceMappings := []DeviceMapping{} - for _, device := range flDevices.GetAll() { - deviceMapping, err := ParseDevice(device) - if err != nil { - return nil, nil, cmd, err - } - deviceMappings = append(deviceMappings, deviceMapping) - } - - // collect all the environment variables for the container - envVariables, err := readKVStrings(flEnvFile.GetAll(), flEnv.GetAll()) - if err != nil { - return nil, nil, cmd, err - } - - // collect all the labels for the container - labels, err := readKVStrings(flLabelsFile.GetAll(), flLabels.GetAll()) - if err != nil { - return nil, nil, cmd, err - } - - ipcMode := IpcMode(*flIpcMode) - if !ipcMode.Valid() { - return nil, nil, cmd, fmt.Errorf("--ipc: invalid IPC mode") - } - - pidMode := PidMode(*flPidMode) - if !pidMode.Valid() { - return nil, nil, cmd, fmt.Errorf("--pid: invalid PID mode") - } - - utsMode := UTSMode(*flUTSMode) - if !utsMode.Valid() { - return nil, nil, cmd, fmt.Errorf("--uts: invalid UTS mode") - } - - restartPolicy, err := ParseRestartPolicy(*flRestartPolicy) - if err != nil { - return nil, nil, cmd, err - } - - loggingOpts, err := parseLoggingOpts(*flLoggingDriver, flLoggingOpts.GetAll()) - if err != nil { - return nil, nil, cmd, err - } - - config := &Config{ - Hostname: hostname, - Domainname: domainname, - ExposedPorts: ports, - User: *flUser, - Tty: *flTty, - NetworkDisabled: !*flNetwork, - OpenStdin: *flStdin, - AttachStdin: attachStdin, - AttachStdout: attachStdout, - AttachStderr: attachStderr, - Env: envVariables, - Cmd: runCmd, - Image: image, - Volumes: flVolumes.GetMap(), - MacAddress: *flMacAddress, - Entrypoint: entrypoint, - WorkingDir: *flWorkingDir, - Labels: ConvertKVStringsToMap(labels), - StopSignal: *flStopSignal, - } - - hostConfig := &HostConfig{ - Binds: binds, - ContainerIDFile: *flContainerIDFile, - Memory: flMemory, - MemoryReservation: MemoryReservation, - MemorySwap: memorySwap, - KernelMemory: KernelMemory, - CPUShares: *flCPUShares, - CPUPeriod: *flCPUPeriod, - CpusetCpus: *flCpusetCpus, - CpusetMems: *flCpusetMems, - CPUQuota: *flCPUQuota, - BlkioWeight: *flBlkioWeight, - OomKillDisable: *flOomKillDisable, - MemorySwappiness: flSwappiness, - Privileged: *flPrivileged, - PortBindings: portBindings, - Links: flLinks.GetAll(), - PublishAllPorts: *flPublishAll, - DNS: flDNS.GetAll(), - DNSSearch: flDNSSearch.GetAll(), - DNSOptions: flDNSOptions.GetAll(), - ExtraHosts: flExtraHosts.GetAll(), - VolumesFrom: flVolumesFrom.GetAll(), - NetworkMode: NetworkMode(*flNetMode), - IpcMode: ipcMode, - PidMode: pidMode, - UTSMode: utsMode, - Devices: deviceMappings, - CapAdd: stringutils.NewStrSlice(flCapAdd.GetAll()...), - CapDrop: stringutils.NewStrSlice(flCapDrop.GetAll()...), - GroupAdd: flGroupAdd.GetAll(), - RestartPolicy: restartPolicy, - SecurityOpt: flSecurityOpt.GetAll(), - ReadonlyRootfs: *flReadonlyRootfs, - Ulimits: flUlimits.GetList(), - LogConfig: LogConfig{Type: *flLoggingDriver, Config: loggingOpts}, - CgroupParent: *flCgroupParent, - VolumeDriver: *flVolumeDriver, - Isolation: IsolationLevel(*flIsolation), - } - - // When allocating stdin in attached mode, close stdin at client disconnect - if config.OpenStdin && config.AttachStdin { - config.StdinOnce = true - } - return config, hostConfig, cmd, nil -} - -// reads a file of line terminated key=value pairs and override that with override parameter -func readKVStrings(files []string, override []string) ([]string, error) { - envVariables := []string{} - for _, ef := range files { - parsedVars, err := opts.ParseEnvFile(ef) - if err != nil { - return nil, err - } - envVariables = append(envVariables, parsedVars...) - } - // parse the '-e' and '--env' after, to allow override - envVariables = append(envVariables, override...) - - return envVariables, nil -} - -// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} -func ConvertKVStringsToMap(values []string) map[string]string { - result := make(map[string]string, len(values)) - for _, value := range values { - kv := strings.SplitN(value, "=", 2) - if len(kv) == 1 { - result[kv[0]] = "" - } else { - result[kv[0]] = kv[1] - } - } - - return result -} - -func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { - loggingOptsMap := ConvertKVStringsToMap(loggingOpts) - if loggingDriver == "none" && len(loggingOpts) > 0 { - return map[string]string{}, fmt.Errorf("Invalid logging opts for driver %s", loggingDriver) - } - return loggingOptsMap, nil -} - -// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect -func ParseRestartPolicy(policy string) (RestartPolicy, error) { - p := RestartPolicy{} - - if policy == "" { - return p, nil - } - - var ( - parts = strings.Split(policy, ":") - name = parts[0] - ) - - p.Name = name - switch name { - case "always", "unless-stopped": - if len(parts) > 1 { - return p, fmt.Errorf("maximum restart count not valid with restart policy of \"%s\"", name) - } - case "no": - // do nothing - case "on-failure": - if len(parts) > 2 { - return p, fmt.Errorf("restart count format is not valid, usage: 'on-failure:N' or 'on-failure'") - } - if len(parts) == 2 { - count, err := strconv.Atoi(parts[1]) - if err != nil { - return p, err - } - - p.MaximumRetryCount = count - } - default: - return p, fmt.Errorf("invalid restart policy %s", name) - } - - return p, nil -} - -func parseKeyValueOpts(opts opts.ListOpts) ([]KeyValuePair, error) { - out := make([]KeyValuePair, opts.Len()) - for i, o := range opts.GetAll() { - k, v, err := parsers.ParseKeyValueOpt(o) - if err != nil { - return nil, err - } - out[i] = KeyValuePair{Key: k, Value: v} - } - return out, nil -} - -// ParseDevice parses a device mapping string to a DeviceMapping struct -func ParseDevice(device string) (DeviceMapping, error) { - src := "" - dst := "" - permissions := "rwm" - arr := strings.Split(device, ":") - switch len(arr) { - case 3: - permissions = arr[2] - fallthrough - case 2: - if opts.ValidDeviceMode(arr[1]) { - permissions = arr[1] - } else { - dst = arr[1] - } - fallthrough - case 1: - src = arr[0] - default: - return DeviceMapping{}, fmt.Errorf("Invalid device specification: %s", device) - } - - if dst == "" { - dst = src - } - - deviceMapping := DeviceMapping{ - PathOnHost: src, - PathInContainer: dst, - CgroupPermissions: permissions, - } - return deviceMapping, nil -} diff --git a/vendor/github.com/docker/docker/runconfig/parse_test.go b/vendor/github.com/docker/docker/runconfig/parse_test.go deleted file mode 100644 index d2406b09..00000000 --- a/vendor/github.com/docker/docker/runconfig/parse_test.go +++ /dev/null @@ -1,639 +0,0 @@ -package runconfig - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "runtime" - "strings" - "testing" - - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/nat" -) - -func parseRun(args []string) (*Config, *HostConfig, *flag.FlagSet, error) { - cmd := flag.NewFlagSet("run", flag.ContinueOnError) - cmd.SetOutput(ioutil.Discard) - cmd.Usage = nil - return Parse(cmd, args) -} - -func parse(t *testing.T, args string) (*Config, *HostConfig, error) { - config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) - return config, hostConfig, err -} - -func mustParse(t *testing.T, args string) (*Config, *HostConfig) { - config, hostConfig, err := parse(t, args) - if err != nil { - t.Fatal(err) - } - return config, hostConfig -} - -func TestParseRunLinks(t *testing.T) { - if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { - t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) - } -} - -func TestParseRunAttach(t *testing.T) { - if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - - if _, _, err := parse(t, "-a"); err == nil { - t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") - } - if _, _, err := parse(t, "-a invalid"); err == nil { - t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") - } - if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") - } - if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stdin -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stdout -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stderr -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") - } - if _, _, err := parse(t, "-d --rm"); err == nil { - t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") - } -} - -func TestParseRunVolumes(t *testing.T) { - - // A single volume - arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) - } else if _, exists := config.Volumes[arr[0]]; !exists { - t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes) - } - - // Two volumes - arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) - } else if _, exists := config.Volumes[arr[0]]; !exists { - t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes) - } else if _, exists := config.Volumes[arr[1]]; !exists { - t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes) - } - - // A single bind-mount - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] { - t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes) - } - - // Two bind-mounts. - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - // Two bind-mounts, first read-only, second read-write. - // TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4 - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`}, []string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - // Similar to previous test but with alternate modes which are only supported by Linux - if runtime.GOOS != "windows" { - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - } - - // One bind mount and one volume - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] { - t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds) - } else if _, exists := config.Volumes[arr[1]]; !exists { - t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes) - } - - // Root to non-c: drive letter (Windows specific) - if runtime.GOOS == "windows" { - arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 { - t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0]) - } - } - -} - -// This tests the cases for binds which are generated through -// DecodeContainerConfig rather than Parse() -func TestDecodeContainerConfigVolumes(t *testing.T) { - - // Root to root - bindsOrVols, _ := setupPlatformVolume([]string{`/:/`}, []string{os.Getenv("SystemDrive") + `\:c:\`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // No destination path - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:`}, []string{os.Getenv("TEMP") + `\:`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // // No destination path or mode - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp::`}, []string{os.Getenv("TEMP") + `\::`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // A whole lot of nothing - bindsOrVols = []string{`:`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // A whole lot of nothing with no mode - bindsOrVols = []string{`::`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // Too much including an invalid mode - wTmp := os.Getenv("TEMP") - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:/tmp:/tmp:/tmp`}, []string{wTmp + ":" + wTmp + ":" + wTmp + ":" + wTmp}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // Windows specific error tests - if runtime.GOOS == "windows" { - // Volume which does not include a drive letter - bindsOrVols = []string{`\tmp`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // Root to C-Drive - bindsOrVols = []string{os.Getenv("SystemDrive") + `\:c:`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // Container path that does not include a drive letter - bindsOrVols = []string{`c:\windows:\somewhere`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - } - - // Linux-specific error tests - if runtime.GOOS != "windows" { - // Just root - bindsOrVols = []string{`/`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // A single volume that looks like a bind mount passed in Volumes. - // This should be handled as a bind mount, not a volume. - vols := []string{`/foo:/bar`} - if config, hostConfig, err := callDecodeContainerConfig(vols, nil); err != nil { - t.Fatal("Volume /foo:/bar should have succeeded as a volume name") - } else if hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, /foo:/bar should not mount-bind anything. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes[vols[0]]; !exists { - t.Fatalf("Error parsing volume flags, /foo:/bar is missing from volumes. Received %v", config.Volumes) - } - - } -} - -// callDecodeContainerConfig is a utility function used by TestDecodeContainerConfigVolumes -// to call DecodeContainerConfig. It effectively does what a client would -// do when calling the daemon by constructing a JSON stream of a -// ContainerConfigWrapper which is populated by the set of volume specs -// passed into it. It returns a config and a hostconfig which can be -// validated to ensure DecodeContainerConfig has manipulated the structures -// correctly. -func callDecodeContainerConfig(volumes []string, binds []string) (*Config, *HostConfig, error) { - var ( - b []byte - err error - c *Config - h *HostConfig - ) - w := ContainerConfigWrapper{ - Config: &Config{ - Volumes: map[string]struct{}{}, - }, - HostConfig: &HostConfig{ - NetworkMode: "none", - Binds: binds, - }, - } - for _, v := range volumes { - w.Config.Volumes[v] = struct{}{} - } - if b, err = json.Marshal(w); err != nil { - return nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) - } - c, h, err = DecodeContainerConfig(bytes.NewReader(b)) - if err != nil { - return nil, nil, fmt.Errorf("Error parsing %s: %v", string(b), err) - } - if c == nil || h == nil { - return nil, nil, fmt.Errorf("Empty config or hostconfig") - } - - return c, h, err -} - -// check if (a == c && b == d) || (a == d && b == c) -// because maps are randomized -func compareRandomizedStrings(a, b, c, d string) error { - if a == c && b == d { - return nil - } - if a == d && b == c { - return nil - } - return fmt.Errorf("strings don't match") -} - -// setupPlatformVolume takes two arrays of volume specs - a Unix style -// spec and a Windows style spec. Depending on the platform being unit tested, -// it returns one of them, along with a volume string that would be passed -// on the docker CLI (eg -v /bar -v /foo). -func setupPlatformVolume(u []string, w []string) ([]string, string) { - var a []string - if runtime.GOOS == "windows" { - a = w - } else { - a = u - } - s := "" - for _, v := range a { - s = s + "-v " + v + " " - } - return a, s -} - -// Simple parse with MacAddress validatation -func TestParseWithMacAddress(t *testing.T) { - invalidMacAddress := "--mac-address=invalidMacAddress" - validMacAddress := "--mac-address=92:d0:c6:0a:29:33" - if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" { - t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err) - } - if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" { - t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress) - } -} - -func TestParseWithMemory(t *testing.T) { - invalidMemory := "--memory=invalid" - validMemory := "--memory=1G" - if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err != nil && err.Error() != "invalid size: 'invalid'" { - t.Fatalf("Expected an error with '%v' Memory, got '%v'", invalidMemory, err) - } - if _, hostconfig := mustParse(t, validMemory); hostconfig.Memory != 1073741824 { - t.Fatalf("Expected the config to have '1G' as Memory, got '%v'", hostconfig.Memory) - } -} - -func TestParseWithMemorySwap(t *testing.T) { - invalidMemory := "--memory-swap=invalid" - validMemory := "--memory-swap=1G" - anotherValidMemory := "--memory-swap=-1" - if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err == nil || err.Error() != "invalid size: 'invalid'" { - t.Fatalf("Expected an error with '%v' MemorySwap, got '%v'", invalidMemory, err) - } - if _, hostconfig := mustParse(t, validMemory); hostconfig.MemorySwap != 1073741824 { - t.Fatalf("Expected the config to have '1073741824' as MemorySwap, got '%v'", hostconfig.MemorySwap) - } - if _, hostconfig := mustParse(t, anotherValidMemory); hostconfig.MemorySwap != -1 { - t.Fatalf("Expected the config to have '-1' as MemorySwap, got '%v'", hostconfig.MemorySwap) - } -} - -func TestParseHostname(t *testing.T) { - hostname := "--hostname=hostname" - hostnameWithDomain := "--hostname=hostname.domainname" - hostnameWithDomainTld := "--hostname=hostname.domainname.tld" - if config, _ := mustParse(t, hostname); config.Hostname != "hostname" && config.Domainname != "" { - t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) - } - if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname" && config.Domainname != "domainname" { - t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) - } - if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname" && config.Domainname != "domainname.tld" { - t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) - } -} - -func TestParseWithExpose(t *testing.T) { - invalids := map[string]string{ - ":": "Invalid port format for --expose: :", - "8080:9090": "Invalid port format for --expose: 8080:9090", - "/tcp": "Invalid range format for --expose: /tcp, error: Empty string specified for ports.", - "/udp": "Invalid range format for --expose: /udp, error: Empty string specified for ports.", - "NaN/tcp": `Invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "NaN-NaN/tcp": `Invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "8080-NaN/tcp": `Invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "1234567890-8080/tcp": `Invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`, - } - valids := map[string][]nat.Port{ - "8080/tcp": {"8080/tcp"}, - "8080/udp": {"8080/udp"}, - "8080/ncp": {"8080/ncp"}, - "8080-8080/udp": {"8080/udp"}, - "8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"}, - } - for expose, expectedError := range invalids { - if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError { - t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err) - } - } - for expose, exposedPorts := range valids { - config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.ExposedPorts) != len(exposedPorts) { - t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts)) - } - for _, port := range exposedPorts { - if _, ok := config.ExposedPorts[port]; !ok { - t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts) - } - } - } - // Merge with actual published port - config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.ExposedPorts) != 2 { - t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts) - } - ports := []nat.Port{"80/tcp", "81/tcp"} - for _, port := range ports { - if _, ok := config.ExposedPorts[port]; !ok { - t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts) - } - } -} - -func TestParseDevice(t *testing.T) { - valids := map[string]DeviceMapping{ - "/dev/snd": { - PathOnHost: "/dev/snd", - PathInContainer: "/dev/snd", - CgroupPermissions: "rwm", - }, - "/dev/snd:rw": { - PathOnHost: "/dev/snd", - PathInContainer: "/dev/snd", - CgroupPermissions: "rw", - }, - "/dev/snd:/something": { - PathOnHost: "/dev/snd", - PathInContainer: "/something", - CgroupPermissions: "rwm", - }, - "/dev/snd:/something:rw": { - PathOnHost: "/dev/snd", - PathInContainer: "/something", - CgroupPermissions: "rw", - }, - } - for device, deviceMapping := range valids { - _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(hostconfig.Devices) != 1 { - t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices) - } - if hostconfig.Devices[0] != deviceMapping { - t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices) - } - } - -} - -func TestParseModes(t *testing.T) { - // ipc ko - if _, _, _, err := parseRun([]string{"--ipc=container:", "img", "cmd"}); err == nil || err.Error() != "--ipc: invalid IPC mode" { - t.Fatalf("Expected an error with message '--ipc: invalid IPC mode', got %v", err) - } - // ipc ok - _, hostconfig, _, err := parseRun([]string{"--ipc=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.IpcMode.Valid() { - t.Fatalf("Expected a valid IpcMode, got %v", hostconfig.IpcMode) - } - // pid ko - if _, _, _, err := parseRun([]string{"--pid=container:", "img", "cmd"}); err == nil || err.Error() != "--pid: invalid PID mode" { - t.Fatalf("Expected an error with message '--pid: invalid PID mode', got %v", err) - } - // pid ok - _, hostconfig, _, err = parseRun([]string{"--pid=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.PidMode.Valid() { - t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode) - } - // uts ko - if _, _, _, err := parseRun([]string{"--uts=container:", "img", "cmd"}); err == nil || err.Error() != "--uts: invalid UTS mode" { - t.Fatalf("Expected an error with message '--uts: invalid UTS mode', got %v", err) - } - // uts ok - _, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.UTSMode.Valid() { - t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode) - } -} - -func TestParseRestartPolicy(t *testing.T) { - invalids := map[string]string{ - "something": "invalid restart policy something", - "always:2": "maximum restart count not valid with restart policy of \"always\"", - "always:2:3": "maximum restart count not valid with restart policy of \"always\"", - "on-failure:invalid": `strconv.ParseInt: parsing "invalid": invalid syntax`, - "on-failure:2:5": "restart count format is not valid, usage: 'on-failure:N' or 'on-failure'", - } - valids := map[string]RestartPolicy{ - "": {}, - "always": { - Name: "always", - MaximumRetryCount: 0, - }, - "on-failure:1": { - Name: "on-failure", - MaximumRetryCount: 1, - }, - } - for restart, expectedError := range invalids { - if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err) - } - } - for restart, expected := range valids { - _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if hostconfig.RestartPolicy != expected { - t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy) - } - } -} - -func TestParseLoggingOpts(t *testing.T) { - // logging opts ko - if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "Invalid logging opts for driver none" { - t.Fatalf("Expected an error with message 'Invalid logging opts for driver none', got %v", err) - } - // logging opts ok - _, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 { - t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy) - } -} - -func TestParseEnvfileVariables(t *testing.T) { - e := "open nonexistent: no such file or directory" - if runtime.GOOS == "windows" { - e = "open nonexistent: The system cannot find the file specified." - } - // env ko - if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { - t.Fatalf("Expected an error with message '%s', got %v", e, err) - } - // env ok - config, _, _, err := parseRun([]string{"--env-file=fixtures/valid.env", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" { - t.Fatalf("Expected a a config with [ENV1=value1], got %v", config.Env) - } - config, _, _, err = parseRun([]string{"--env-file=fixtures/valid.env", "--env=ENV2=value2", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" { - t.Fatalf("Expected a a config with [ENV1=value1 ENV2=value2], got %v", config.Env) - } -} - -func TestParseLabelfileVariables(t *testing.T) { - e := "open nonexistent: no such file or directory" - if runtime.GOOS == "windows" { - e = "open nonexistent: The system cannot find the file specified." - } - // label ko - if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { - t.Fatalf("Expected an error with message '%s', got %v", e, err) - } - // label ok - config, _, _, err := parseRun([]string{"--label-file=fixtures/valid.label", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" { - t.Fatalf("Expected a a config with [LABEL1:value1], got %v", config.Labels) - } - config, _, _, err = parseRun([]string{"--label-file=fixtures/valid.label", "--label=LABEL2=value2", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" { - t.Fatalf("Expected a a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels) - } -} - -func TestParseEntryPoint(t *testing.T) { - config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"}) - if err != nil { - t.Fatal(err) - } - if config.Entrypoint.Len() != 1 && config.Entrypoint.Slice()[0] != "anything" { - t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) - } -} diff --git a/vendor/github.com/docker/docker/runconfig/streams.go b/vendor/github.com/docker/docker/runconfig/streams.go new file mode 100644 index 00000000..548c7826 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/streams.go @@ -0,0 +1,109 @@ +package runconfig + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/docker/docker/pkg/broadcaster" + "github.com/docker/docker/pkg/ioutils" +) + +// StreamConfig holds information about I/O streams managed together. +// +// streamConfig.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the streamConfig's active process. +// streamConfig.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". +type StreamConfig struct { + sync.WaitGroup + stdout *broadcaster.Unbuffered + stderr *broadcaster.Unbuffered + stdin io.ReadCloser + stdinPipe io.WriteCloser +} + +// NewStreamConfig creates a stream config and initializes +// the standard err and standard out to new unbuffered broadcasters. +func NewStreamConfig() *StreamConfig { + return &StreamConfig{ + stderr: new(broadcaster.Unbuffered), + stdout: new(broadcaster.Unbuffered), + } +} + +// Stdout returns the standard output in the configuration. +func (streamConfig *StreamConfig) Stdout() *broadcaster.Unbuffered { + return streamConfig.stdout +} + +// Stderr returns the standard error in the configuration. +func (streamConfig *StreamConfig) Stderr() *broadcaster.Unbuffered { + return streamConfig.stderr +} + +// Stdin returns the standard input in the configuration. +func (streamConfig *StreamConfig) Stdin() io.ReadCloser { + return streamConfig.stdin +} + +// StdinPipe returns an input writer pipe as an io.WriteCloser. +func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser { + return streamConfig.stdinPipe +} + +// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new out pipe to the Stdout broadcaster. +func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe(nil) + streamConfig.stdout.Add(bytesPipe) + return bytesPipe +} + +// StderrPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new err pipe to the Stderr broadcaster. +func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe(nil) + streamConfig.stderr.Add(bytesPipe) + return bytesPipe +} + +// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. +func (streamConfig *StreamConfig) NewInputPipes() { + streamConfig.stdin, streamConfig.stdinPipe = io.Pipe() +} + +// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. +func (streamConfig *StreamConfig) NewNopInputPipe() { + streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) +} + +// CloseStreams ensures that the configured streams are properly closed. +func (streamConfig *StreamConfig) CloseStreams() error { + var errors []string + + if streamConfig.stdin != nil { + if err := streamConfig.stdin.Close(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) + } + } + + if err := streamConfig.stdout.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) + } + + if err := streamConfig.stderr.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/trash.conf b/vendor/github.com/docker/docker/trash.conf new file mode 100644 index 00000000..d0387800 --- /dev/null +++ b/vendor/github.com/docker/docker/trash.conf @@ -0,0 +1,66 @@ +github.com/Azure/go-ansiterm 70b2c90b260171e829f1ebd7c17f600c11858dbe +github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 +github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883 +github.com/Microsoft/go-winio v0.1.0 +github.com/Microsoft/hcsshim v0.1.0 +github.com/RackSec/srslog 259aed10dfa74ea2961eddd1d9847619f6e98837 +github.com/Sirupsen/logrus v0.9.0 +github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c +github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec +github.com/aws/aws-sdk-go v0.9.9 +github.com/boltdb/bolt v1.2.0 +github.com/cloudfoundry/gosigar 3ed7c74352dae6dc00bdc8c74045375352e3ec05 +github.com/codegangsta/cli 9fec0fad02befc9209347cc6d620e68e1b45f74d +github.com/coreos/etcd v2.2.0 +github.com/coreos/go-systemd v4 +github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d +github.com/docker/containerd 8c538d6b92f2c512c3ad8a854826461ac9095b74 https://github.com/ibuildthecloud/containerd.git +github.com/docker/distribution 467fc068d88aa6610691b7f1a677271a3fac4aac +github.com/docker/engine-api v0.3.3 +github.com/docker/go v1.5.1-1-1-gbaf439e +github.com/docker/go-connections v0.2.0 +github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3 +github.com/docker/libkv c2aac5dbbaa5c872211edea7c0f32b3bd67e7410 +github.com/docker/libnetwork v0.7.2-rc.1 +github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +github.com/docker/notary docker-v1.11-3 +github.com/fluent/fluent-logger-golang v1.1.0 +github.com/go-check/check a625211d932a2a643d0d17352095f03fb7774663 https://github.com/cpuguy83/check.git +github.com/godbus/dbus v3 +github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 +github.com/gorilla/context 14f550f51a +github.com/gorilla/mux e444e69cbd +github.com/hashicorp/consul v0.5.2 +github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b +github.com/hashicorp/memberlist 9a1e242e454d2443df330bdd51a436d5a9058fc4 +github.com/hashicorp/serf 7151adcef72687bf95f451a2e0ba15cb19412bf2 +github.com/imdario/mergo 0.2.1 +github.com/kr/pty 5cf931ef8f +github.com/mattn/go-shellwords v1.0.0 +github.com/mattn/go-sqlite3 v1.1.0 +github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 +github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f +github.com/mistifyio/go-zfs v2.1.1 +github.com/opencontainers/runc edc34c4a8c1e261b5ce926ff557ecde1aff19ce3 https://github.com/ibuildthecloud/runc.git +github.com/opencontainers/runtime-spec f955d90e70a98ddfb886bd930ffd076da9b67998 +github.com/opencontainers/specs f955d90e70a98ddfb886bd930ffd076da9b67998 +github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa +github.com/rcrowley/go-metrics eeba7bd0dd01ace6e690fa833b3f22aaec29af43 +github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 +github.com/seccomp/libseccomp-golang 1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1 +github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 +github.com/tchap/go-patricia v2.1.0 +github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c +github.com/ugorji/go 5abd4e96a45c386928ed2ca2a7ef63e2533e18ec +github.com/vaughan0/go-ini a98ad7ee00ec53921f08832bc06ecf7fd600e6a1 +github.com/vbatts/tar-split v0.9.11 +github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 +github.com/vishvananda/netlink 631962935bff4f3d20ff32a72e8944f6d2836a26 +github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 +golang.org/x/net 991d3e32f76f19ee6d9caadb3a22eae8d23315f7 https://github.com/golang/net.git +golang.org/x/oauth2 2baa8a1b9338cf13d9eeb27696d761155fa480be https://github.com/golang/oauth2.git +golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git +google.golang.org/api dc6d2353af16e2a2b0ff6986af051d473a4ed468 https://code.googlesource.com/google-api-go-client +google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 https://code.googlesource.com/gocloud +google.golang.org/grpc ab0be5212fb225475f2087566eded7da5d727960 https://github.com/grpc/grpc-go.git +gopkg.in/fsnotify.v1 v1.2.0 diff --git a/vendor/github.com/docker/docker/utils/debug.go b/vendor/github.com/docker/docker/utils/debug.go new file mode 100644 index 00000000..d2038911 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/debug.go @@ -0,0 +1,26 @@ +package utils + +import ( + "os" + + "github.com/Sirupsen/logrus" +) + +// EnableDebug sets the DEBUG env var to true +// and makes the logger to log at debug level. +func EnableDebug() { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) +} + +// DisableDebug sets the DEBUG env var to false +// and makes the logger to log at info level. +func DisableDebug() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) +} + +// IsDebugEnabled checks whether the debug flag is set or not. +func IsDebugEnabled() bool { + return os.Getenv("DEBUG") != "" +} diff --git a/vendor/github.com/docker/docker/utils/git_test.go b/vendor/github.com/docker/docker/utils/git_test.go deleted file mode 100644 index e9eb5956..00000000 --- a/vendor/github.com/docker/docker/utils/git_test.go +++ /dev/null @@ -1,186 +0,0 @@ -package utils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "os" - "path/filepath" - "reflect" - "testing" -) - -func TestCloneArgsSmartHttp(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - serverURL, _ := url.Parse(server.URL) - - serverURL.Path = "/repo.git" - gitURL := serverURL.String() - - mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { - q := r.URL.Query().Get("service") - w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) - }) - - args := cloneArgs(serverURL, "/tmp") - exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsDumbHttp(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - serverURL, _ := url.Parse(server.URL) - - serverURL.Path = "/repo.git" - gitURL := serverURL.String() - - mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain") - }) - - args := cloneArgs(serverURL, "/tmp") - exp := []string{"clone", "--recursive", gitURL, "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsGit(t *testing.T) { - u, _ := url.Parse("git://github.com/docker/docker") - args := cloneArgs(u, "/tmp") - exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsStripFragment(t *testing.T) { - u, _ := url.Parse("git://github.com/docker/docker#test") - args := cloneArgs(u, "/tmp") - exp := []string{"clone", "--recursive", "git://github.com/docker/docker", "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCheckoutGit(t *testing.T) { - root, err := ioutil.TempDir("", "docker-build-git-checkout") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) - - gitDir := filepath.Join(root, "repo") - _, err = git("init", gitDir) - if err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test"); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644); err != nil { - t.Fatal(err) - } - - subDir := filepath.Join(gitDir, "subdir") - if err = os.Mkdir(subDir, 0755); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644); err != nil { - t.Fatal(err) - } - - if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { - t.Fatal(err) - } - - if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "commit", "-am", "First commit"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "checkout", "-b", "test"); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "checkout", "master"); err != nil { - t.Fatal(err) - } - - cases := []struct { - frag string - exp string - fail bool - }{ - {"", "FROM scratch", false}, - {"master", "FROM scratch", false}, - {":subdir", "FROM scratch\nEXPOSE 5000", false}, - {":nosubdir", "", true}, // missing directory error - {":Dockerfile", "", true}, // not a directory error - {"master:nosubdir", "", true}, - {"master:subdir", "FROM scratch\nEXPOSE 5000", false}, - {"master:parentlink", "FROM scratch\nEXPOSE 5000", false}, - {"master:absolutelink", "FROM scratch\nEXPOSE 5000", false}, - {"master:../subdir", "", true}, - {"test", "FROM scratch\nEXPOSE 3000", false}, - {"test:", "FROM scratch\nEXPOSE 3000", false}, - {"test:subdir", "FROM busybox\nEXPOSE 5000", false}, - } - - for _, c := range cases { - r, err := checkoutGit(c.frag, gitDir) - - fail := err != nil - if fail != c.fail { - t.Fatalf("Expected %v failure, error was %v\n", c.fail, err) - } - if c.fail { - continue - } - - b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) - if err != nil { - t.Fatal(err) - } - - if string(b) != c.exp { - t.Fatalf("Expected %v, was %v\n", c.exp, string(b)) - } - } -} diff --git a/vendor/github.com/docker/docker/utils/names.go b/vendor/github.com/docker/docker/utils/names.go index e09e569b..8239c0de 100644 --- a/vendor/github.com/docker/docker/utils/names.go +++ b/vendor/github.com/docker/docker/utils/names.go @@ -7,3 +7,6 @@ const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` // RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. var RestrictedNamePattern = regexp.MustCompile(`^/?` + RestrictedNameChars + `+$`) + +// RestrictedVolumeNamePattern is a regular expression to validate volume names against the collection of restricted characters. +var RestrictedVolumeNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) diff --git a/vendor/github.com/docker/docker/utils/process_unix.go b/vendor/github.com/docker/docker/utils/process_unix.go new file mode 100644 index 00000000..bdb1b46b --- /dev/null +++ b/vendor/github.com/docker/docker/utils/process_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package utils + +import ( + "syscall" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := syscall.Kill(pid, syscall.Signal(0)) + if err == nil || err == syscall.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + syscall.Kill(pid, syscall.SIGKILL) +} diff --git a/vendor/github.com/docker/docker/utils/templates/templates.go b/vendor/github.com/docker/docker/utils/templates/templates.go new file mode 100644 index 00000000..749da3d5 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/templates/templates.go @@ -0,0 +1,33 @@ +package templates + +import ( + "encoding/json" + "strings" + "text/template" +) + +// basicFunctions are the set of initial +// functions provided to every template. +var basicFunctions = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, + "split": strings.Split, + "join": strings.Join, + "title": strings.Title, + "lower": strings.ToLower, + "upper": strings.ToUpper, +} + +// Parse creates a new annonymous template with the basic functions +// and parses the given format. +func Parse(format string) (*template.Template, error) { + return NewParse("", format) +} + +// NewParse creates a new tagged template with the basic functions +// and parses the given format. +func NewParse(tag, format string) (*template.Template, error) { + return template.New(tag).Funcs(basicFunctions).Parse(format) +} diff --git a/vendor/github.com/docker/docker/utils/timeout.go b/vendor/github.com/docker/docker/utils/timeout.go deleted file mode 100644 index 85d2665c..00000000 --- a/vendor/github.com/docker/docker/utils/timeout.go +++ /dev/null @@ -1,21 +0,0 @@ -package utils - -import ( - "net" - "net/url" -) - -// IsTimeout takes an error returned from (generally) the http package and determines if it is a timeout error. -func IsTimeout(err error) bool { - switch e := err.(type) { - case net.Error: - return e.Timeout() - case *url.Error: - if t, ok := e.Err.(net.Error); ok { - return t.Timeout() - } - return false - default: - return false - } -} diff --git a/vendor/github.com/docker/docker/utils/utils.go b/vendor/github.com/docker/docker/utils/utils.go index d2e83f61..d3dd00ab 100644 --- a/vendor/github.com/docker/docker/utils/utils.go +++ b/vendor/github.com/docker/docker/utils/utils.go @@ -1,126 +1,16 @@ package utils import ( - "bufio" - "crypto/sha1" - "encoding/hex" "fmt" - "io" "io/ioutil" "os" - "os/exec" - "path/filepath" "runtime" "strings" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/stringid" ) -// SelfPath figures out the absolute path of our own binary (if it's still around). -func SelfPath() string { - path, err := exec.LookPath(os.Args[0]) - if err != nil { - if os.IsNotExist(err) { - return "" - } - if execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) { - return "" - } - panic(err) - } - path, err = filepath.Abs(path) - if err != nil { - if os.IsNotExist(err) { - return "" - } - panic(err) - } - return path -} - -func dockerInitSha1(target string) string { - f, err := os.Open(target) - if err != nil { - return "" - } - defer f.Close() - h := sha1.New() - _, err = io.Copy(h, f) - if err != nil { - return "" - } - return hex.EncodeToString(h.Sum(nil)) -} - -func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this) - if target == "" { - return false - } - if dockerversion.IAmStatic == "true" { - if selfPath == "" { - return false - } - if target == selfPath { - return true - } - targetFileInfo, err := os.Lstat(target) - if err != nil { - return false - } - selfPathFileInfo, err := os.Lstat(selfPath) - if err != nil { - return false - } - return os.SameFile(targetFileInfo, selfPathFileInfo) - } - return dockerversion.InitSHA1 != "" && dockerInitSha1(target) == dockerversion.InitSHA1 -} - -// DockerInitPath figures out the path of our dockerinit (which may be SelfPath()) -func DockerInitPath(localCopy string) string { - selfPath := SelfPath() - if isValidDockerInitPath(selfPath, selfPath) { - // if we're valid, don't bother checking anything else - return selfPath - } - var possibleInits = []string{ - localCopy, - dockerversion.InitPath, - filepath.Join(filepath.Dir(selfPath), "dockerinit"), - - // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." - // https://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec - "/usr/libexec/docker/dockerinit", - "/usr/local/libexec/docker/dockerinit", - - // FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts." - // https://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA - "/usr/lib/docker/dockerinit", - "/usr/local/lib/docker/dockerinit", - } - for _, dockerInit := range possibleInits { - if dockerInit == "" { - continue - } - path, err := exec.LookPath(dockerInit) - if err == nil { - path, err = filepath.Abs(path) - if err != nil { - // LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail? - panic(err) - } - if isValidDockerInitPath(path, selfPath) { - return path - } - } - } - return "" -} - var globalTestID string // TestDirectory creates a new temporary directory and returns its path. @@ -195,112 +85,3 @@ func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { return defaults } - -// ValidateContextDirectory checks if all the contents of the directory -// can be read and returns an error if some files can't be read -// symlinks which point to non-existing files don't trigger an error -func ValidateContextDirectory(srcPath string, excludes []string) error { - contextRoot, err := getContextRoot(srcPath) - if err != nil { - return err - } - return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { - // skip this directory/file if it's not in the path, it won't get added to the context - if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { - return err - } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { - return err - } else if skip { - if f.IsDir() { - return filepath.SkipDir - } - return nil - } - - if err != nil { - if os.IsPermission(err) { - return fmt.Errorf("can't stat '%s'", filePath) - } - if os.IsNotExist(err) { - return nil - } - return err - } - - // skip checking if symlinks point to non-existing files, such symlinks can be useful - // also skip named pipes, because they hanging on open - if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { - return nil - } - - if !f.IsDir() { - currentFile, err := os.Open(filePath) - if err != nil && os.IsPermission(err) { - return fmt.Errorf("no permission to read from '%s'", filePath) - } - currentFile.Close() - } - return nil - }) -} - -// ReadDockerIgnore reads a .dockerignore file and returns the list of file patterns -// to ignore. Note this will trim whitespace from each line as well -// as use GO's "clean" func to get the shortest/cleanest path for each. -func ReadDockerIgnore(reader io.ReadCloser) ([]string, error) { - if reader == nil { - return nil, nil - } - defer reader.Close() - scanner := bufio.NewScanner(reader) - var excludes []string - - for scanner.Scan() { - pattern := strings.TrimSpace(scanner.Text()) - if pattern == "" { - continue - } - pattern = filepath.Clean(pattern) - excludes = append(excludes, pattern) - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("Error reading .dockerignore: %v", err) - } - return excludes, nil -} - -// ImageReference combines `repo` and `ref` and returns a string representing -// the combination. If `ref` is a digest (meaning it's of the form -// :, the returned string is @. Otherwise, -// ref is assumed to be a tag, and the returned string is :. -func ImageReference(repo, ref string) string { - if DigestReference(ref) { - return repo + "@" + ref - } - return repo + ":" + ref -} - -// DigestReference returns true if ref is a digest reference; i.e. if it -// is of the form :. -func DigestReference(ref string) bool { - return strings.Contains(ref, ":") -} - -// GetErrorMessage returns the human readable message associated with -// the passed-in error. In some cases the default Error() func returns -// something that is less than useful so based on its types this func -// will go and get a better piece of text. -func GetErrorMessage(err error) string { - switch err.(type) { - case errcode.Error: - e, _ := err.(errcode.Error) - return e.Message - - case errcode.ErrorCode: - ec, _ := err.(errcode.ErrorCode) - return ec.Message() - - default: - return err.Error() - } -} diff --git a/vendor/github.com/docker/docker/utils/utils_test.go b/vendor/github.com/docker/docker/utils/utils_test.go deleted file mode 100644 index 9acb8017..00000000 --- a/vendor/github.com/docker/docker/utils/utils_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package utils - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestReplaceAndAppendEnvVars(t *testing.T) { - var ( - d = []string{"HOME=/"} - o = []string{"HOME=/root", "TERM=xterm"} - ) - - env := ReplaceOrAppendEnvValues(d, o) - if len(env) != 2 { - t.Fatalf("expected len of 2 got %d", len(env)) - } - if env[0] != "HOME=/root" { - t.Fatalf("expected HOME=/root got '%s'", env[0]) - } - if env[1] != "TERM=xterm" { - t.Fatalf("expected TERM=xterm got '%s'", env[1]) - } -} - -func TestImageReference(t *testing.T) { - tests := []struct { - repo string - ref string - expected string - }{ - {"repo", "tag", "repo:tag"}, - {"repo", "sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64", "repo@sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64"}, - } - - for i, test := range tests { - actual := ImageReference(test.repo, test.ref) - if test.expected != actual { - t.Errorf("%d: expected %q, got %q", i, test.expected, actual) - } - } -} - -func TestDigestReference(t *testing.T) { - input := "sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64" - if !DigestReference(input) { - t.Errorf("Expected DigestReference=true for input %q", input) - } - - input = "latest" - if DigestReference(input) { - t.Errorf("Unexpected DigestReference=true for input %q", input) - } -} - -func TestReadDockerIgnore(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "dockerignore-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - di, err := ReadDockerIgnore(nil) - if err != nil { - t.Fatalf("Expected not to have error, got %v", err) - } - - if diLen := len(di); diLen != 0 { - t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) - } - - diName := filepath.Join(tmpDir, ".dockerignore") - content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile") - err = ioutil.WriteFile(diName, []byte(content), 0777) - if err != nil { - t.Fatal(err) - } - - diFd, err := os.Open(diName) - if err != nil { - t.Fatal(err) - } - di, err = ReadDockerIgnore(diFd) - if err != nil { - t.Fatal(err) - } - - if di[0] != "test1" { - t.Fatalf("First element is not test1") - } - if di[1] != "/test2" { - t.Fatalf("Second element is not /test2") - } - if di[2] != "/a/file/here" { - t.Fatalf("Third element is not /a/file/here") - } - if di[3] != "lastfile" { - t.Fatalf("Fourth element is not lastfile") - } -} diff --git a/vendor/github.com/docker/docker/utils/utils_windows.go b/vendor/github.com/docker/docker/utils/utils_windows.go deleted file mode 100644 index 80b58bd9..00000000 --- a/vendor/github.com/docker/docker/utils/utils_windows.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build windows - -package utils - -import ( - "path/filepath" - - "github.com/docker/docker/pkg/longpath" -) - -func getContextRoot(srcPath string) (string, error) { - cr, err := filepath.Abs(srcPath) - if err != nil { - return "", err - } - return longpath.AddPrefix(cr), nil -} diff --git a/vendor/github.com/docker/docker/volume/drivers/adapter.go b/vendor/github.com/docker/docker/volume/drivers/adapter.go new file mode 100644 index 00000000..e7ca3d50 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/adapter.go @@ -0,0 +1,106 @@ +package volumedrivers + +import ( + "fmt" + + "github.com/docker/docker/volume" +) + +type volumeDriverAdapter struct { + name string + proxy *volumeDriverProxy +} + +func (a *volumeDriverAdapter) Name() string { + return a.name +} + +func (a *volumeDriverAdapter) Create(name string, opts map[string]string) (volume.Volume, error) { + if err := a.proxy.Create(name, opts); err != nil { + return nil, err + } + return &volumeAdapter{ + proxy: a.proxy, + name: name, + driverName: a.name, + }, nil +} + +func (a *volumeDriverAdapter) Remove(v volume.Volume) error { + return a.proxy.Remove(v.Name()) +} + +func (a *volumeDriverAdapter) List() ([]volume.Volume, error) { + ls, err := a.proxy.List() + if err != nil { + return nil, err + } + + var out []volume.Volume + for _, vp := range ls { + out = append(out, &volumeAdapter{ + proxy: a.proxy, + name: vp.Name, + driverName: a.name, + eMount: vp.Mountpoint, + }) + } + return out, nil +} + +func (a *volumeDriverAdapter) Get(name string) (volume.Volume, error) { + v, err := a.proxy.Get(name) + if err != nil { + return nil, err + } + + // plugin may have returned no volume and no error + if v == nil { + return nil, fmt.Errorf("no such volume") + } + + return &volumeAdapter{ + proxy: a.proxy, + name: v.Name, + driverName: a.Name(), + eMount: v.Mountpoint, + }, nil +} + +type volumeAdapter struct { + proxy *volumeDriverProxy + name string + driverName string + eMount string // ephemeral host volume path +} + +type proxyVolume struct { + Name string + Mountpoint string +} + +func (a *volumeAdapter) Name() string { + return a.name +} + +func (a *volumeAdapter) DriverName() string { + return a.driverName +} + +func (a *volumeAdapter) Path() string { + if len(a.eMount) > 0 { + return a.eMount + } + m, _ := a.proxy.Path(a.name) + return m +} + +func (a *volumeAdapter) Mount() (string, error) { + var err error + a.eMount, err = a.proxy.Mount(a.name) + return a.eMount, err +} + +func (a *volumeAdapter) Unmount() error { + return a.proxy.Unmount(a.name) +} diff --git a/vendor/github.com/docker/docker/volume/drivers/extpoint.go b/vendor/github.com/docker/docker/volume/drivers/extpoint.go new file mode 100644 index 00000000..a55da5af --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/extpoint.go @@ -0,0 +1,164 @@ +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver + +package volumedrivers + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/volume" +) + +// currently created by hand. generation tool would generate this like: +// $ extpoint-gen Driver > volume/extpoint.go + +var drivers = &driverExtpoint{extensions: make(map[string]volume.Driver), driverLock: &locker.Locker{}} + +const extName = "VolumeDriver" + +// NewVolumeDriver returns a driver has the given name mapped on the given client. +func NewVolumeDriver(name string, c client) volume.Driver { + proxy := &volumeDriverProxy{c} + return &volumeDriverAdapter{name: name, proxy: proxy} +} + +type opts map[string]string +type list []*proxyVolume + +// volumeDriver defines the available functions that volume plugins must implement. +// This interface is only defined to generate the proxy objects. +// It's not intended to be public or reused. +type volumeDriver interface { + // Create a volume with the given name + Create(name string, opts opts) (err error) + // Remove the volume with the given name + Remove(name string) (err error) + // Get the mountpoint of the given volume + Path(name string) (mountpoint string, err error) + // Mount the given volume and return the mountpoint + Mount(name string) (mountpoint string, err error) + // Unmount the given volume + Unmount(name string) (err error) + // List lists all the volumes known to the driver + List() (volumes list, err error) + // Get retrieves the volume with the requested name + Get(name string) (volume *proxyVolume, err error) +} + +type driverExtpoint struct { + extensions map[string]volume.Driver + sync.Mutex + driverLock *locker.Locker +} + +// Register associates the given driver to the given name, checking if +// the name is already associated +func Register(extension volume.Driver, name string) bool { + if name == "" { + return false + } + + drivers.Lock() + defer drivers.Unlock() + + _, exists := drivers.extensions[name] + if exists { + return false + } + drivers.extensions[name] = extension + return true +} + +// Unregister dissociates the name from its driver, if the association exists. +func Unregister(name string) bool { + drivers.Lock() + defer drivers.Unlock() + + _, exists := drivers.extensions[name] + if !exists { + return false + } + delete(drivers.extensions, name) + return true +} + +// Lookup returns the driver associated with the given name. If a +// driver with the given name has not been registered it checks if +// there is a VolumeDriver plugin available with the given name. +func Lookup(name string) (volume.Driver, error) { + drivers.driverLock.Lock(name) + defer drivers.driverLock.Unlock(name) + + drivers.Lock() + ext, ok := drivers.extensions[name] + drivers.Unlock() + if ok { + return ext, nil + } + + pl, err := plugins.Get(name, extName) + if err != nil { + return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err) + } + + drivers.Lock() + defer drivers.Unlock() + if ext, ok := drivers.extensions[name]; ok { + return ext, nil + } + + d := NewVolumeDriver(name, pl.Client) + drivers.extensions[name] = d + return d, nil +} + +// GetDriver returns a volume driver by its name. +// If the driver is empty, it looks for the local driver. +func GetDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return Lookup(name) +} + +// GetDriverList returns list of volume drivers registered. +// If no driver is registered, empty string list will be returned. +func GetDriverList() []string { + var driverList []string + drivers.Lock() + for driverName := range drivers.extensions { + driverList = append(driverList, driverName) + } + drivers.Unlock() + return driverList +} + +// GetAllDrivers lists all the registered drivers +func GetAllDrivers() ([]volume.Driver, error) { + plugins, err := plugins.GetAll(extName) + if err != nil { + return nil, err + } + var ds []volume.Driver + + drivers.Lock() + defer drivers.Unlock() + + for _, d := range drivers.extensions { + ds = append(ds, d) + } + + for _, p := range plugins { + ext, ok := drivers.extensions[p.Name] + if ok { + continue + } + + ext = NewVolumeDriver(p.Name, p.Client) + drivers.extensions[p.Name] = ext + ds = append(ds, ext) + } + return ds, nil +} diff --git a/vendor/github.com/docker/docker/volume/drivers/proxy.go b/vendor/github.com/docker/docker/volume/drivers/proxy.go new file mode 100644 index 00000000..5c7cdcb7 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/proxy.go @@ -0,0 +1,207 @@ +// generated code - DO NOT EDIT + +package volumedrivers + +import "errors" + +type client interface { + Call(string, interface{}, interface{}) error +} + +type volumeDriverProxy struct { + client +} + +type volumeDriverProxyCreateRequest struct { + Name string + Opts opts +} + +type volumeDriverProxyCreateResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Create(name string, opts opts) (err error) { + var ( + req volumeDriverProxyCreateRequest + ret volumeDriverProxyCreateResponse + ) + + req.Name = name + req.Opts = opts + if err = pp.Call("VolumeDriver.Create", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyRemoveRequest struct { + Name string +} + +type volumeDriverProxyRemoveResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Remove(name string) (err error) { + var ( + req volumeDriverProxyRemoveRequest + ret volumeDriverProxyRemoveResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Remove", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyPathRequest struct { + Name string +} + +type volumeDriverProxyPathResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Path(name string) (mountpoint string, err error) { + var ( + req volumeDriverProxyPathRequest + ret volumeDriverProxyPathResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Path", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyMountRequest struct { + Name string +} + +type volumeDriverProxyMountResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Mount(name string) (mountpoint string, err error) { + var ( + req volumeDriverProxyMountRequest + ret volumeDriverProxyMountResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Mount", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyUnmountRequest struct { + Name string +} + +type volumeDriverProxyUnmountResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Unmount(name string) (err error) { + var ( + req volumeDriverProxyUnmountRequest + ret volumeDriverProxyUnmountResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Unmount", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyListRequest struct { +} + +type volumeDriverProxyListResponse struct { + Volumes list + Err string +} + +func (pp *volumeDriverProxy) List() (volumes list, err error) { + var ( + req volumeDriverProxyListRequest + ret volumeDriverProxyListResponse + ) + + if err = pp.Call("VolumeDriver.List", req, &ret); err != nil { + return + } + + volumes = ret.Volumes + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyGetRequest struct { + Name string +} + +type volumeDriverProxyGetResponse struct { + Volume *proxyVolume + Err string +} + +func (pp *volumeDriverProxy) Get(name string) (volume *proxyVolume, err error) { + var ( + req volumeDriverProxyGetRequest + ret volumeDriverProxyGetResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Get", req, &ret); err != nil { + return + } + + volume = ret.Volume + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} diff --git a/vendor/github.com/docker/docker/volume/local/local.go b/vendor/github.com/docker/docker/volume/local/local.go new file mode 100644 index 00000000..0bca731a --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local.go @@ -0,0 +1,330 @@ +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume" +) + +// VolumeDataPathName is the name of the directory where the volume data is stored. +// It uses a very distinctive name to avoid collisions migrating data between +// Docker versions. +const ( + VolumeDataPathName = "_data" + volumesPathName = "volumes" +) + +var ( + // ErrNotFound is the typed error returned when the requested volume name can't be found + ErrNotFound = fmt.Errorf("volume not found") + // volumeNameRegex ensures the name assigned for the volume is valid. + // This name is used to create the bind directory, so we need to avoid characters that + // would make the path to escape the root directory. + volumeNameRegex = utils.RestrictedVolumeNamePattern +) + +type validationError struct { + error +} + +func (validationError) IsValidationError() bool { + return true +} + +type activeMount struct { + count uint64 + mounted bool +} + +// New instantiates a new Root instance with the provided scope. Scope +// is the base path that the Root instance uses to store its +// volumes. The base path is created here if it does not exist. +func New(scope string, rootUID, rootGID int) (*Root, error) { + rootDirectory := filepath.Join(scope, volumesPathName) + + if err := idtools.MkdirAllAs(rootDirectory, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + r := &Root{ + scope: scope, + path: rootDirectory, + volumes: make(map[string]*localVolume), + rootUID: rootUID, + rootGID: rootGID, + } + + dirs, err := ioutil.ReadDir(rootDirectory) + if err != nil { + return nil, err + } + + mountInfos, err := mount.GetMounts() + if err != nil { + logrus.Debugf("error looking up mounts for local volume cleanup: %v", err) + } + + for _, d := range dirs { + if !d.IsDir() { + continue + } + + name := filepath.Base(d.Name()) + v := &localVolume{ + driverName: r.Name(), + name: name, + path: r.DataPath(name), + } + r.volumes[name] = v + if b, err := ioutil.ReadFile(filepath.Join(name, "opts.json")); err == nil { + if err := json.Unmarshal(b, v.opts); err != nil { + return nil, err + } + + // unmount anything that may still be mounted (for example, from an unclean shutdown) + for _, info := range mountInfos { + if info.Mountpoint == v.path { + mount.Unmount(v.path) + break + } + } + } + } + + return r, nil +} + +// Root implements the Driver interface for the volume package and +// manages the creation/removal of volumes. It uses only standard vfs +// commands to create/remove dirs within its provided scope. +type Root struct { + m sync.Mutex + scope string + path string + volumes map[string]*localVolume + rootUID int + rootGID int +} + +// List lists all the volumes +func (r *Root) List() ([]volume.Volume, error) { + var ls []volume.Volume + r.m.Lock() + for _, v := range r.volumes { + ls = append(ls, v) + } + r.m.Unlock() + return ls, nil +} + +// DataPath returns the constructed path of this volume. +func (r *Root) DataPath(volumeName string) string { + return filepath.Join(r.path, volumeName, VolumeDataPathName) +} + +// Name returns the name of Root, defined in the volume package in the DefaultDriverName constant. +func (r *Root) Name() string { + return volume.DefaultDriverName +} + +// Create creates a new volume.Volume with the provided name, creating +// the underlying directory tree required for this volume in the +// process. +func (r *Root) Create(name string, opts map[string]string) (volume.Volume, error) { + if err := r.validateName(name); err != nil { + return nil, err + } + + r.m.Lock() + defer r.m.Unlock() + + v, exists := r.volumes[name] + if exists { + return v, nil + } + + path := r.DataPath(name) + if err := idtools.MkdirAllAs(path, 0755, r.rootUID, r.rootGID); err != nil { + if os.IsExist(err) { + return nil, fmt.Errorf("volume already exists under %s", filepath.Dir(path)) + } + return nil, err + } + + var err error + defer func() { + if err != nil { + os.RemoveAll(filepath.Dir(path)) + } + }() + + v = &localVolume{ + driverName: r.Name(), + name: name, + path: path, + } + + if opts != nil { + if err = setOpts(v, opts); err != nil { + return nil, err + } + var b []byte + b, err = json.Marshal(v.opts) + if err != nil { + return nil, err + } + if err = ioutil.WriteFile(filepath.Join(filepath.Dir(path), "opts.json"), b, 600); err != nil { + return nil, err + } + } + + r.volumes[name] = v + return v, nil +} + +// Remove removes the specified volume and all underlying data. If the +// given volume does not belong to this driver and an error is +// returned. The volume is reference counted, if all references are +// not released then the volume is not removed. +func (r *Root) Remove(v volume.Volume) error { + r.m.Lock() + defer r.m.Unlock() + + lv, ok := v.(*localVolume) + if !ok { + return fmt.Errorf("unknown volume type %T", v) + } + + realPath, err := filepath.EvalSymlinks(lv.path) + if err != nil { + if !os.IsNotExist(err) { + return err + } + realPath = filepath.Dir(lv.path) + } + + if !r.scopedPath(realPath) { + return fmt.Errorf("Unable to remove a directory of out the Docker root %s: %s", r.scope, realPath) + } + + if err := removePath(realPath); err != nil { + return err + } + + delete(r.volumes, lv.name) + return removePath(filepath.Dir(lv.path)) +} + +func removePath(path string) error { + if err := os.RemoveAll(path); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + return nil +} + +// Get looks up the volume for the given name and returns it if found +func (r *Root) Get(name string) (volume.Volume, error) { + r.m.Lock() + v, exists := r.volumes[name] + r.m.Unlock() + if !exists { + return nil, ErrNotFound + } + return v, nil +} + +func (r *Root) validateName(name string) error { + if !volumeNameRegex.MatchString(name) { + return validationError{fmt.Errorf("%q includes invalid characters for a local volume name, only %q are allowed", name, utils.RestrictedNameChars)} + } + return nil +} + +// localVolume implements the Volume interface from the volume package and +// represents the volumes created by Root. +type localVolume struct { + m sync.Mutex + usedCount int + // unique name of the volume + name string + // path is the path on the host where the data lives + path string + // driverName is the name of the driver that created the volume. + driverName string + // opts is the parsed list of options used to create the volume + opts *optsConfig + // active refcounts the active mounts + active activeMount +} + +// Name returns the name of the given Volume. +func (v *localVolume) Name() string { + return v.name +} + +// DriverName returns the driver that created the given Volume. +func (v *localVolume) DriverName() string { + return v.driverName +} + +// Path returns the data location. +func (v *localVolume) Path() string { + return v.path +} + +// Mount implements the localVolume interface, returning the data location. +func (v *localVolume) Mount() (string, error) { + v.m.Lock() + defer v.m.Unlock() + if v.opts != nil { + if !v.active.mounted { + if err := v.mount(); err != nil { + return "", err + } + v.active.mounted = true + } + v.active.count++ + } + return v.path, nil +} + +// Umount is for satisfying the localVolume interface and does not do anything in this driver. +func (v *localVolume) Unmount() error { + v.m.Lock() + defer v.m.Unlock() + if v.opts != nil { + v.active.count-- + if v.active.count == 0 { + if err := mount.Unmount(v.path); err != nil { + v.active.count++ + return err + } + v.active.mounted = false + } + } + return nil +} + +func validateOpts(opts map[string]string) error { + for opt := range opts { + if !validOpts[opt] { + return validationError{fmt.Errorf("invalid option key: %q", opt)} + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/volume/local/local_unix.go b/vendor/github.com/docker/docker/volume/local/local_unix.go new file mode 100644 index 00000000..2e63777a --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local_unix.go @@ -0,0 +1,69 @@ +// +build linux freebsd + +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/mount" +) + +var ( + oldVfsDir = filepath.Join("vfs", "dir") + + validOpts = map[string]bool{ + "type": true, // specify the filesystem type for mount, e.g. nfs + "o": true, // generic mount options + "device": true, // device to mount from + } +) + +type optsConfig struct { + MountType string + MountOpts string + MountDevice string +} + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + // Volumes path for Docker version >= 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { + return true + } + + // Volumes path for Docker version < 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, oldVfsDir)) { + return true + } + + return false +} + +func setOpts(v *localVolume, opts map[string]string) error { + if len(opts) == 0 { + return nil + } + if err := validateOpts(opts); err != nil { + return err + } + + v.opts = &optsConfig{ + MountType: opts["type"], + MountOpts: opts["o"], + MountDevice: opts["device"], + } + return nil +} + +func (v *localVolume) mount() error { + if v.opts.MountDevice == "" { + return fmt.Errorf("missing device in volume options") + } + return mount.Mount(v.opts.MountDevice, v.path, v.opts.MountType, v.opts.MountOpts) +} diff --git a/vendor/github.com/docker/docker/volume/store/errors.go b/vendor/github.com/docker/docker/volume/store/errors.go new file mode 100644 index 00000000..7bdfa12b --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/errors.go @@ -0,0 +1,74 @@ +package store + +import ( + "errors" + "strings" +) + +var ( + // errVolumeInUse is a typed error returned when trying to remove a volume that is currently in use by a container + errVolumeInUse = errors.New("volume is in use") + // errNoSuchVolume is a typed error returned if the requested volume doesn't exist in the volume store + errNoSuchVolume = errors.New("no such volume") + // errInvalidName is a typed error returned when creating a volume with a name that is not valid on the platform + errInvalidName = errors.New("volume name is not valid on this platform") + // errNameConflict is a typed error returned on create when a volume exists with the given name, but for a different driver + errNameConflict = errors.New("conflict: volume name must be unique") +) + +// OpErr is the error type returned by functions in the store package. It describes +// the operation, volume name, and error. +type OpErr struct { + // Err is the error that occurred during the operation. + Err error + // Op is the operation which caused the error, such as "create", or "list". + Op string + // Name is the name of the resource being requested for this op, typically the volume name or the driver name. + Name string + // Refs is the list of references associated with the resource. + Refs []string +} + +// Error satisfies the built-in error interface type. +func (e *OpErr) Error() string { + if e == nil { + return "" + } + s := e.Op + if e.Name != "" { + s = s + " " + e.Name + } + + s = s + ": " + e.Err.Error() + if len(e.Refs) > 0 { + s = s + " - " + "[" + strings.Join(e.Refs, ", ") + "]" + } + return s +} + +// IsInUse returns a boolean indicating whether the error indicates that a +// volume is in use +func IsInUse(err error) bool { + return isErr(err, errVolumeInUse) +} + +// IsNotExist returns a boolean indicating whether the error indicates that the volume does not exist +func IsNotExist(err error) bool { + return isErr(err, errNoSuchVolume) +} + +// IsNameConflict returns a boolean indicating whether the error indicates that a +// volume name is already taken +func IsNameConflict(err error) bool { + return isErr(err, errNameConflict) +} + +func isErr(err error, expected error) bool { + switch pe := err.(type) { + case nil: + return false + case *OpErr: + err = pe.Err + } + return err == expected +} diff --git a/vendor/github.com/docker/docker/volume/store/store.go b/vendor/github.com/docker/docker/volume/store/store.go new file mode 100644 index 00000000..b2ac9a8c --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store.go @@ -0,0 +1,506 @@ +package store + +import ( + "bytes" + "encoding/json" + "os" + "path/filepath" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +const ( + volumeDataDir = "volumes" + volumeBucketName = "volumes" +) + +type volumeMetadata struct { + Name string + Labels map[string]string +} + +type volumeWithLabels struct { + volume.Volume + labels map[string]string +} + +func (v volumeWithLabels) Labels() map[string]string { + return v.labels +} + +// New initializes a VolumeStore to keep +// reference counting of volumes in the system. +func New(rootPath string) (*VolumeStore, error) { + vs := &VolumeStore{ + locks: &locker.Locker{}, + names: make(map[string]volume.Volume), + refs: make(map[string][]string), + labels: make(map[string]map[string]string), + } + + if rootPath != "" { + // initialize metadata store + volPath := filepath.Join(rootPath, volumeDataDir) + if err := os.MkdirAll(volPath, 750); err != nil { + return nil, err + } + + dbPath := filepath.Join(volPath, "metadata.db") + + var err error + vs.db, err = bolt.Open(dbPath, 0600, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return nil, err + } + + // initialize volumes bucket + if err := vs.db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists([]byte(volumeBucketName)); err != nil { + return err + } + + return nil + }); err != nil { + return nil, err + } + } + + return vs, nil +} + +func (s *VolumeStore) getNamed(name string) (volume.Volume, bool) { + s.globalLock.Lock() + v, exists := s.names[name] + s.globalLock.Unlock() + return v, exists +} + +func (s *VolumeStore) setNamed(v volume.Volume, ref string) { + s.globalLock.Lock() + s.names[v.Name()] = v + if len(ref) > 0 { + s.refs[v.Name()] = append(s.refs[v.Name()], ref) + } + s.globalLock.Unlock() +} + +func (s *VolumeStore) purge(name string) { + s.globalLock.Lock() + delete(s.names, name) + delete(s.refs, name) + delete(s.labels, name) + s.globalLock.Unlock() +} + +// VolumeStore is a struct that stores the list of volumes available and keeps track of their usage counts +type VolumeStore struct { + locks *locker.Locker + globalLock sync.Mutex + // names stores the volume name -> driver name relationship. + // This is used for making lookups faster so we don't have to probe all drivers + names map[string]volume.Volume + // refs stores the volume name and the list of things referencing it + refs map[string][]string + // labels stores volume labels for each volume + labels map[string]map[string]string + db *bolt.DB +} + +// List proxies to all registered volume drivers to get the full list of volumes +// If a driver returns a volume that has name which conflicts with another volume from a different driver, +// the first volume is chosen and the conflicting volume is dropped. +func (s *VolumeStore) List() ([]volume.Volume, []string, error) { + vols, warnings, err := s.list() + if err != nil { + return nil, nil, &OpErr{Err: err, Op: "list"} + } + var out []volume.Volume + + for _, v := range vols { + name := normaliseVolumeName(v.Name()) + + s.locks.Lock(name) + storedV, exists := s.getNamed(name) + // Note: it's not safe to populate the cache here because the volume may have been + // deleted before we acquire a lock on its name + if exists && storedV.DriverName() != v.DriverName() { + logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName()) + s.locks.Unlock(v.Name()) + continue + } + + out = append(out, v) + s.locks.Unlock(v.Name()) + } + return out, warnings, nil +} + +// list goes through each volume driver and asks for its list of volumes. +func (s *VolumeStore) list() ([]volume.Volume, []string, error) { + drivers, err := volumedrivers.GetAllDrivers() + if err != nil { + return nil, nil, err + } + var ( + ls []volume.Volume + warnings []string + ) + + type vols struct { + vols []volume.Volume + err error + driverName string + } + chVols := make(chan vols, len(drivers)) + + for _, vd := range drivers { + go func(d volume.Driver) { + vs, err := d.List() + if err != nil { + chVols <- vols{driverName: d.Name(), err: &OpErr{Err: err, Name: d.Name(), Op: "list"}} + return + } + chVols <- vols{vols: vs} + }(vd) + } + + badDrivers := make(map[string]struct{}) + for i := 0; i < len(drivers); i++ { + vs := <-chVols + + if vs.err != nil { + warnings = append(warnings, vs.err.Error()) + badDrivers[vs.driverName] = struct{}{} + logrus.Warn(vs.err) + } + ls = append(ls, vs.vols...) + } + + if len(badDrivers) > 0 { + for _, v := range s.names { + if _, exists := badDrivers[v.DriverName()]; exists { + ls = append(ls, v) + } + } + } + return ls, warnings, nil +} + +// CreateWithRef creates a volume with the given name and driver and stores the ref +// This is just like Create() except we store the reference while holding the lock. +// This ensures there's no race between creating a volume and then storing a reference. +func (s *VolumeStore) CreateWithRef(name, driverName, ref string, opts, labels map[string]string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.create(name, driverName, opts, labels) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "create"} + } + + s.setNamed(v, ref) + return v, nil +} + +// Create creates a volume with the given name and driver. +func (s *VolumeStore) Create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.create(name, driverName, opts, labels) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "create"} + } + s.setNamed(v, "") + return v, nil +} + +// create asks the given driver to create a volume with the name/opts. +// If a volume with the name is already known, it will ask the stored driver for the volume. +// If the passed in driver name does not match the driver name which is stored for the given volume name, an error is returned. +// It is expected that callers of this function hold any necessary locks. +func (s *VolumeStore) create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { + // Validate the name in a platform-specific manner + valid, err := volume.IsVolumeNameValid(name) + if err != nil { + return nil, err + } + if !valid { + return nil, &OpErr{Err: errInvalidName, Name: name, Op: "create"} + } + + if v, exists := s.getNamed(name); exists { + if v.DriverName() != driverName && driverName != "" && driverName != volume.DefaultDriverName { + return nil, errNameConflict + } + return v, nil + } + + // Since there isn't a specified driver name, let's see if any of the existing drivers have this volume name + if driverName == "" { + v, _ := s.getVolume(name) + if v != nil { + return v, nil + } + } + + vd, err := volumedrivers.GetDriver(driverName) + + if err != nil { + return nil, &OpErr{Op: "create", Name: name, Err: err} + } + + logrus.Debugf("Registering new volume reference: driver %q, name %q", vd.Name(), name) + + if v, _ := vd.Get(name); v != nil { + return v, nil + } + v, err := vd.Create(name, opts) + if err != nil { + return nil, err + } + s.globalLock.Lock() + s.labels[name] = labels + s.globalLock.Unlock() + + if s.db != nil { + metadata := &volumeMetadata{ + Name: name, + Labels: labels, + } + + volData, err := json.Marshal(metadata) + if err != nil { + return nil, err + } + + if err := s.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(volumeBucketName)) + err := b.Put([]byte(name), volData) + return err + }); err != nil { + return nil, err + } + } + + return volumeWithLabels{v, labels}, nil +} + +// GetWithRef gets a volume with the given name from the passed in driver and stores the ref +// This is just like Get(), but we store the reference while holding the lock. +// This makes sure there are no races between checking for the existence of a volume and adding a reference for it +func (s *VolumeStore) GetWithRef(name, driverName, ref string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + + v, err := vd.Get(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + + s.setNamed(v, ref) + if labels, ok := s.labels[name]; ok { + return volumeWithLabels{v, labels}, nil + } + return v, nil +} + +// Get looks if a volume with the given name exists and returns it if so +func (s *VolumeStore) Get(name string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.getVolume(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + s.setNamed(v, "") + return v, nil +} + +// getVolume requests the volume, if the driver info is stored it just accesses that driver, +// if the driver is unknown it probes all drivers until it finds the first volume with that name. +// it is expected that callers of this function hold any necessary locks +func (s *VolumeStore) getVolume(name string) (volume.Volume, error) { + labels := map[string]string{} + + if s.db != nil { + // get meta + if err := s.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(volumeBucketName)) + data := b.Get([]byte(name)) + + if string(data) == "" { + return nil + } + + var meta volumeMetadata + buf := bytes.NewBuffer(data) + + if err := json.NewDecoder(buf).Decode(&meta); err != nil { + return err + } + labels = meta.Labels + + return nil + }); err != nil { + return nil, err + } + } + + logrus.Debugf("Getting volume reference for name: %s", name) + s.globalLock.Lock() + v, exists := s.names[name] + s.globalLock.Unlock() + if exists { + vd, err := volumedrivers.GetDriver(v.DriverName()) + if err != nil { + return nil, err + } + vol, err := vd.Get(name) + if err != nil { + return nil, err + } + return volumeWithLabels{vol, labels}, nil + } + + logrus.Debugf("Probing all drivers for volume with name: %s", name) + drivers, err := volumedrivers.GetAllDrivers() + if err != nil { + return nil, err + } + + for _, d := range drivers { + v, err := d.Get(name) + if err != nil { + continue + } + + return volumeWithLabels{v, labels}, nil + } + return nil, errNoSuchVolume +} + +// Remove removes the requested volume. A volume is not removed if it has any refs +func (s *VolumeStore) Remove(v volume.Volume) error { + name := normaliseVolumeName(v.Name()) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + if refs, exists := s.refs[name]; exists && len(refs) > 0 { + return &OpErr{Err: errVolumeInUse, Name: v.Name(), Op: "remove", Refs: refs} + } + + vd, err := volumedrivers.GetDriver(v.DriverName()) + if err != nil { + return &OpErr{Err: err, Name: vd.Name(), Op: "remove"} + } + + logrus.Debugf("Removing volume reference: driver %s, name %s", v.DriverName(), name) + vol := withoutLabels(v) + if err := vd.Remove(vol); err != nil { + return &OpErr{Err: err, Name: name, Op: "remove"} + } + + s.purge(name) + return nil +} + +// Dereference removes the specified reference to the volume +func (s *VolumeStore) Dereference(v volume.Volume, ref string) { + s.locks.Lock(v.Name()) + defer s.locks.Unlock(v.Name()) + + s.globalLock.Lock() + defer s.globalLock.Unlock() + var refs []string + + for _, r := range s.refs[v.Name()] { + if r != ref { + refs = append(refs, r) + } + } + s.refs[v.Name()] = refs +} + +// Refs gets the current list of refs for the given volume +func (s *VolumeStore) Refs(v volume.Volume) []string { + s.locks.Lock(v.Name()) + defer s.locks.Unlock(v.Name()) + + s.globalLock.Lock() + defer s.globalLock.Unlock() + refs, exists := s.refs[v.Name()] + if !exists { + return nil + } + + refsOut := make([]string, len(refs)) + copy(refsOut, refs) + return refsOut +} + +// FilterByDriver returns the available volumes filtered by driver name +func (s *VolumeStore) FilterByDriver(name string) ([]volume.Volume, error) { + vd, err := volumedrivers.GetDriver(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "list"} + } + ls, err := vd.List() + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "list"} + } + return ls, nil +} + +// FilterByUsed returns the available volumes filtered by if they are in use or not. +// `used=true` returns only volumes that are being used, while `used=false` returns +// only volumes that are not being used. +func (s *VolumeStore) FilterByUsed(vols []volume.Volume, used bool) []volume.Volume { + return s.filter(vols, func(v volume.Volume) bool { + s.locks.Lock(v.Name()) + l := len(s.refs[v.Name()]) + s.locks.Unlock(v.Name()) + if (used && l > 0) || (!used && l == 0) { + return true + } + return false + }) +} + +// filterFunc defines a function to allow filter volumes in the store +type filterFunc func(vol volume.Volume) bool + +// filter returns the available volumes filtered by a filterFunc function +func (s *VolumeStore) filter(vols []volume.Volume, f filterFunc) []volume.Volume { + var ls []volume.Volume + for _, v := range vols { + if f(v) { + ls = append(ls, v) + } + } + return ls +} + +func withoutLabels(v volume.Volume) volume.Volume { + if vol, ok := v.(volumeWithLabels); ok { + return vol.Volume + } + + return v +} diff --git a/vendor/github.com/docker/docker/volume/store/store_unix.go b/vendor/github.com/docker/docker/volume/store/store_unix.go new file mode 100644 index 00000000..319c541d --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd + +package store + +// normaliseVolumeName is a platform specific function to normalise the name +// of a volume. This is a no-op on Unix-like platforms +func normaliseVolumeName(name string) string { + return name +} diff --git a/vendor/github.com/docker/docker/volume/volume.go b/vendor/github.com/docker/docker/volume/volume.go index 98f90f06..077cddfb 100644 --- a/vendor/github.com/docker/docker/volume/volume.go +++ b/vendor/github.com/docker/docker/volume/volume.go @@ -1,13 +1,10 @@ package volume import ( + "fmt" "os" "runtime" "strings" - - "github.com/Sirupsen/logrus" - derr "github.com/docker/docker/errors" - "github.com/docker/docker/pkg/system" ) // DefaultDriverName is the driver name used for the driver @@ -21,7 +18,11 @@ type Driver interface { // Create makes a new volume with the given id. Create(name string, opts map[string]string) (Volume, error) // Remove deletes the volume. - Remove(Volume) error + Remove(vol Volume) (err error) + // List lists all the volumes the driver has + List() ([]Volume, error) + // Get retrieves the volume with the requested name + Get(name string) (Volume, error) } // Volume is a place to store data. It is backed by a specific driver, and can be mounted. @@ -52,6 +53,15 @@ type MountPoint struct { // Note Mode is not used on Windows Mode string `json:"Relabel"` // Originally field was `Relabel`" + + // Note Propagation is not used on Windows + Propagation string // Mount propagation string + Named bool // specifies if the mountpoint was specified by name + + // Specifies if data should be copied from the container before the first mount + // Use a pointer here so we can tell if the user set this value explicitly + // This allows us to error out when the user explicitly enabled copy but we can't copy due to the volume being populated + CopyData bool `json:"-"` } // Setup sets up a mount point by either mounting the volume if it is @@ -66,15 +76,14 @@ func (m *MountPoint) Setup() (string, error) { return "", err } if runtime.GOOS != "windows" { // Windows does not have deprecation issues here - logrus.Warnf("Auto-creating non-existant volume host path %s, this is deprecated and will be removed soon", m.Source) - if err := system.MkdirAll(m.Source, 0755); err != nil { + if err := os.MkdirAll(m.Source, 0755); err != nil { return "", err } } } return m.Source, nil } - return "", derr.ErrorCodeMountSetup + return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") } // Path returns the path of a volume in a mount point. @@ -85,21 +94,10 @@ func (m *MountPoint) Path() string { return m.Source } -// ValidMountMode will make sure the mount mode is valid. -// returns if it's a valid mount mode or not. -func ValidMountMode(mode string) bool { - return roModes[strings.ToLower(mode)] || rwModes[strings.ToLower(mode)] -} - -// ReadWrite tells you if a mode string is a valid read-write mode or not. -func ReadWrite(mode string) bool { - return rwModes[strings.ToLower(mode)] -} - -// ParseVolumesFrom ensure that the supplied volumes-from is valid. +// ParseVolumesFrom ensures that the supplied volumes-from is valid. func ParseVolumesFrom(spec string) (string, string, error) { if len(spec) == 0 { - return "", "", derr.ErrorCodeVolumeFromBlank.WithArgs(spec) + return "", "", fmt.Errorf("malformed volumes-from specification: %s", spec) } specParts := strings.SplitN(spec, ":", 2) @@ -109,64 +107,27 @@ func ParseVolumesFrom(spec string) (string, string, error) { if len(specParts) == 2 { mode = specParts[1] if !ValidMountMode(mode) { - return "", "", derr.ErrorCodeVolumeInvalidMode.WithArgs(mode) + return "", "", errInvalidMode(mode) + } + // For now don't allow propagation properties while importing + // volumes from data container. These volumes will inherit + // the same propagation property as of the original volume + // in data container. This probably can be relaxed in future. + if HasPropagation(mode) { + return "", "", errInvalidMode(mode) + } + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode); isSet { + return "", "", errInvalidMode(mode) } } return id, mode, nil } -// SplitN splits raw into a maximum of n parts, separated by a separator colon. -// A separator colon is the last `:` character in the regex `[/:\\]?[a-zA-Z]:` (note `\\` is `\` escaped). -// This allows to correctly split strings such as `C:\foo:D:\:rw`. -func SplitN(raw string, n int) []string { - var array []string - if len(raw) == 0 || raw[0] == ':' { - // invalid - return nil - } - // numberOfParts counts the number of parts separated by a separator colon - numberOfParts := 0 - // left represents the left-most cursor in raw, updated at every `:` character considered as a separator. - left := 0 - // right represents the right-most cursor in raw incremented with the loop. Note this - // starts at index 1 as index 0 is already handle above as a special case. - for right := 1; right < len(raw); right++ { - // stop parsing if reached maximum number of parts - if n >= 0 && numberOfParts >= n { - break - } - if raw[right] != ':' { - continue - } - potentialDriveLetter := raw[right-1] - if (potentialDriveLetter >= 'A' && potentialDriveLetter <= 'Z') || (potentialDriveLetter >= 'a' && potentialDriveLetter <= 'z') { - if right > 1 { - beforePotentialDriveLetter := raw[right-2] - if beforePotentialDriveLetter != ':' && beforePotentialDriveLetter != '/' && beforePotentialDriveLetter != '\\' { - // e.g. `C:` is not preceded by any delimiter, therefore it was not a drive letter but a path ending with `C:`. - array = append(array, raw[left:right]) - left = right + 1 - numberOfParts++ - } - // else, `C:` is considered as a drive letter and not as a delimiter, so we continue parsing. - } - // if right == 1, then `C:` is the beginning of the raw string, therefore `:` is again not considered a delimiter and we continue parsing. - } else { - // if `:` is not preceded by a potential drive letter, then consider it as a delimiter. - array = append(array, raw[left:right]) - left = right + 1 - numberOfParts++ - } - } - // need to take care of the last part - if left < len(raw) { - if n >= 0 && numberOfParts >= n { - // if the maximum number of parts is reached, just append the rest to the last part - // left-1 is at the last `:` that needs to be included since not considered a separator. - array[n-1] += raw[left-1:] - } else { - array = append(array, raw[left:]) - } - } - return array +func errInvalidMode(mode string) error { + return fmt.Errorf("invalid mode: %v", mode) +} + +func errInvalidSpec(spec string) error { + return fmt.Errorf("Invalid volume specification: '%s'", spec) } diff --git a/vendor/github.com/docker/docker/volume/volume_copy.go b/vendor/github.com/docker/docker/volume/volume_copy.go new file mode 100644 index 00000000..067537fb --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_copy.go @@ -0,0 +1,28 @@ +package volume + +import "strings" + +const ( + // DefaultCopyMode is the copy mode used by default for normal/named volumes + DefaultCopyMode = true +) + +// {=isEnabled} +var copyModes = map[string]bool{ + "nocopy": false, +} + +func copyModeExists(mode string) bool { + _, exists := copyModes[mode] + return exists +} + +// GetCopyMode gets the copy mode from the mode string for mounts +func getCopyMode(mode string) (bool, bool) { + for _, o := range strings.Split(mode, ",") { + if isEnabled, exists := copyModes[o]; exists { + return isEnabled, true + } + } + return DefaultCopyMode, false +} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_linux.go b/vendor/github.com/docker/docker/volume/volume_propagation_linux.go new file mode 100644 index 00000000..f5f28205 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_propagation_linux.go @@ -0,0 +1,44 @@ +// +build linux + +package volume + +import ( + "strings" +) + +// DefaultPropagationMode defines what propagation mode should be used by +// default if user has not specified one explicitly. +const DefaultPropagationMode string = "rprivate" + +// propagation modes +var propagationModes = map[string]bool{ + "private": true, + "rprivate": true, + "slave": true, + "rslave": true, + "shared": true, + "rshared": true, +} + +// GetPropagation extracts and returns the mount propagation mode. If there +// are no specifications, then by default it is "private". +func GetPropagation(mode string) string { + for _, o := range strings.Split(mode, ",") { + if propagationModes[o] { + return o + } + } + return DefaultPropagationMode +} + +// HasPropagation checks if there is a valid propagation mode present in +// passed string. Returns true if a valid propagation mode specifier is +// present, false otherwise. +func HasPropagation(mode string) bool { + for _, o := range strings.Split(mode, ",") { + if propagationModes[o] { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go b/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go new file mode 100644 index 00000000..0edc89ab --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go @@ -0,0 +1,22 @@ +// +build !linux + +package volume + +// DefaultPropagationMode is used only in linux. In other cases it returns +// empty string. +const DefaultPropagationMode string = "" + +// propagation modes not supported on this platform. +var propagationModes = map[string]bool{} + +// GetPropagation is not supported. Return empty string. +func GetPropagation(mode string) string { + return DefaultPropagationMode +} + +// HasPropagation checks if there is a valid propagation mode present in +// passed string. Returns true if a valid propagation mode specifier is +// present, false otherwise. +func HasPropagation(mode string) bool { + return false +} diff --git a/vendor/github.com/docker/docker/volume/volume_test.go b/vendor/github.com/docker/docker/volume/volume_test.go deleted file mode 100644 index 2ee62e63..00000000 --- a/vendor/github.com/docker/docker/volume/volume_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package volume - -import ( - "runtime" - "strings" - "testing" -) - -func TestParseMountSpec(t *testing.T) { - var ( - valid []string - invalid map[string]string - ) - - if runtime.GOOS == "windows" { - valid = []string{ - `d:\`, - `d:`, - `d:\path`, - `d:\path with space`, - // TODO Windows post TP4 - readonly support `d:\pathandmode:ro`, - `c:\:d:\`, - `c:\windows\:d:`, - `c:\windows:d:\s p a c e`, - `c:\windows:d:\s p a c e:RW`, - `c:\program files:d:\s p a c e i n h o s t d i r`, - `0123456789name:d:`, - `MiXeDcAsEnAmE:d:`, - `name:D:`, - `name:D::rW`, - `name:D::RW`, - // TODO Windows post TP4 - readonly support `name:D::RO`, - `c:/:d:/forward/slashes/are/good/too`, - // TODO Windows post TP4 - readonly support `c:/:d:/including with/spaces:ro`, - `c:\Windows`, // With capital - `c:\Program Files (x86)`, // With capitals and brackets - } - invalid = map[string]string{ - ``: "Invalid volume specification: ", - `.`: "Invalid volume specification: ", - `..\`: "Invalid volume specification: ", - `c:\:..\`: "Invalid volume specification: ", - `c:\:d:\:xyzzy`: "Invalid volume specification: ", - `c:`: "cannot be c:", - `c:\`: `cannot be c:\`, - `c:\notexist:d:`: `The system cannot find the file specified`, - `c:\windows\system32\ntdll.dll:d:`: `Source 'c:\windows\system32\ntdll.dll' is not a directory`, - `name<:d:`: `Invalid volume specification`, - `name>:d:`: `Invalid volume specification`, - `name::d:`: `Invalid volume specification`, - `name":d:`: `Invalid volume specification`, - `name\:d:`: `Invalid volume specification`, - `name*:d:`: `Invalid volume specification`, - `name|:d:`: `Invalid volume specification`, - `name?:d:`: `Invalid volume specification`, - `name/:d:`: `Invalid volume specification`, - `d:\pathandmode:rw`: `Invalid volume specification`, - `con:d:`: `cannot be a reserved word for Windows filenames`, - `PRN:d:`: `cannot be a reserved word for Windows filenames`, - `aUx:d:`: `cannot be a reserved word for Windows filenames`, - `nul:d:`: `cannot be a reserved word for Windows filenames`, - `com1:d:`: `cannot be a reserved word for Windows filenames`, - `com2:d:`: `cannot be a reserved word for Windows filenames`, - `com3:d:`: `cannot be a reserved word for Windows filenames`, - `com4:d:`: `cannot be a reserved word for Windows filenames`, - `com5:d:`: `cannot be a reserved word for Windows filenames`, - `com6:d:`: `cannot be a reserved word for Windows filenames`, - `com7:d:`: `cannot be a reserved word for Windows filenames`, - `com8:d:`: `cannot be a reserved word for Windows filenames`, - `com9:d:`: `cannot be a reserved word for Windows filenames`, - `lpt1:d:`: `cannot be a reserved word for Windows filenames`, - `lpt2:d:`: `cannot be a reserved word for Windows filenames`, - `lpt3:d:`: `cannot be a reserved word for Windows filenames`, - `lpt4:d:`: `cannot be a reserved word for Windows filenames`, - `lpt5:d:`: `cannot be a reserved word for Windows filenames`, - `lpt6:d:`: `cannot be a reserved word for Windows filenames`, - `lpt7:d:`: `cannot be a reserved word for Windows filenames`, - `lpt8:d:`: `cannot be a reserved word for Windows filenames`, - `lpt9:d:`: `cannot be a reserved word for Windows filenames`, - } - - } else { - valid = []string{ - "/home", - "/home:/home", - "/home:/something/else", - "/with space", - "/home:/with space", - "relative:/absolute-path", - "hostPath:/containerPath:ro", - "/hostPath:/containerPath:rw", - "/rw:/ro", - } - invalid = map[string]string{ - "": "Invalid volume specification", - "./": "Invalid volume destination", - "../": "Invalid volume destination", - "/:../": "Invalid volume destination", - "/:path": "Invalid volume destination", - ":": "Invalid volume specification", - "/tmp:": "Invalid volume destination", - ":test": "Invalid volume specification", - ":/test": "Invalid volume specification", - "tmp:": "Invalid volume destination", - ":test:": "Invalid volume specification", - "::": "Invalid volume specification", - ":::": "Invalid volume specification", - "/tmp:::": "Invalid volume specification", - ":/tmp::": "Invalid volume specification", - "/path:rw": "Invalid volume specification", - "/path:ro": "Invalid volume specification", - "/rw:rw": "Invalid volume specification", - "path:ro": "Invalid volume specification", - "/path:/path:sw": `invalid mode: "sw"`, - "/path:/path:rwz": `invalid mode: "rwz"`, - } - } - - for _, path := range valid { - if _, err := ParseMountSpec(path, "local"); err != nil { - t.Fatalf("ParseMountSpec(`%q`) should succeed: error %q", path, err) - } - } - - for path, expectedError := range invalid { - if _, err := ParseMountSpec(path, "local"); err == nil { - t.Fatalf("ParseMountSpec(`%q`) should have failed validation. Err %v", path, err) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ParseMountSpec(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) - } - } - } -} - -func TestSplitN(t *testing.T) { - for _, x := range []struct { - input string - n int - expected []string - }{ - {`C:\foo:d:`, -1, []string{`C:\foo`, `d:`}}, - {`:C:\foo:d:`, -1, nil}, - {`/foo:/bar:ro`, 3, []string{`/foo`, `/bar`, `ro`}}, - {`/foo:/bar:ro`, 2, []string{`/foo`, `/bar:ro`}}, - {`C:\foo\:/foo`, -1, []string{`C:\foo\`, `/foo`}}, - - {`d:\`, -1, []string{`d:\`}}, - {`d:`, -1, []string{`d:`}}, - {`d:\path`, -1, []string{`d:\path`}}, - {`d:\path with space`, -1, []string{`d:\path with space`}}, - {`d:\pathandmode:rw`, -1, []string{`d:\pathandmode`, `rw`}}, - {`c:\:d:\`, -1, []string{`c:\`, `d:\`}}, - {`c:\windows\:d:`, -1, []string{`c:\windows\`, `d:`}}, - {`c:\windows:d:\s p a c e`, -1, []string{`c:\windows`, `d:\s p a c e`}}, - {`c:\windows:d:\s p a c e:RW`, -1, []string{`c:\windows`, `d:\s p a c e`, `RW`}}, - {`c:\program files:d:\s p a c e i n h o s t d i r`, -1, []string{`c:\program files`, `d:\s p a c e i n h o s t d i r`}}, - {`0123456789name:d:`, -1, []string{`0123456789name`, `d:`}}, - {`MiXeDcAsEnAmE:d:`, -1, []string{`MiXeDcAsEnAmE`, `d:`}}, - {`name:D:`, -1, []string{`name`, `D:`}}, - {`name:D::rW`, -1, []string{`name`, `D:`, `rW`}}, - {`name:D::RW`, -1, []string{`name`, `D:`, `RW`}}, - {`c:/:d:/forward/slashes/are/good/too`, -1, []string{`c:/`, `d:/forward/slashes/are/good/too`}}, - {`c:\Windows`, -1, []string{`c:\Windows`}}, - {`c:\Program Files (x86)`, -1, []string{`c:\Program Files (x86)`}}, - - {``, -1, nil}, - {`.`, -1, []string{`.`}}, - {`..\`, -1, []string{`..\`}}, - {`c:\:..\`, -1, []string{`c:\`, `..\`}}, - {`c:\:d:\:xyzzy`, -1, []string{`c:\`, `d:\`, `xyzzy`}}, - } { - res := SplitN(x.input, x.n) - if len(res) < len(x.expected) { - t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) - } - for i, e := range res { - if e != x.expected[i] { - t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) - } - } - } -} - -// testParseMountSpec is a structure used by TestParseMountSpecSplit for -// specifying test cases for the ParseMountSpec() function. -type testParseMountSpec struct { - bind string - driver string - expDest string - expSource string - expName string - expDriver string - expRW bool - fail bool -} - -func TestParseMountSpecSplit(t *testing.T) { - var cases []testParseMountSpec - if runtime.GOOS == "windows" { - cases = []testParseMountSpec{ - {`c:\:d:`, "local", `d:`, `c:\`, ``, "", true, false}, - {`c:\:d:\`, "local", `d:\`, `c:\`, ``, "", true, false}, - // TODO Windows post TP4 - Add readonly support {`c:\:d:\:ro`, "local", `d:\`, `c:\`, ``, "", false, false}, - {`c:\:d:\:rw`, "local", `d:\`, `c:\`, ``, "", true, false}, - {`c:\:d:\:foo`, "local", `d:\`, `c:\`, ``, "", false, true}, - {`name:d::rw`, "local", `d:`, ``, `name`, "local", true, false}, - {`name:d:`, "local", `d:`, ``, `name`, "local", true, false}, - // TODO Windows post TP4 - Add readonly support {`name:d::ro`, "local", `d:`, ``, `name`, "local", false, false}, - {`name:c:`, "", ``, ``, ``, "", true, true}, - {`driver/name:c:`, "", ``, ``, ``, "", true, true}, - } - } else { - cases = []testParseMountSpec{ - {"/tmp:/tmp1", "", "/tmp1", "/tmp", "", "", true, false}, - {"/tmp:/tmp2:ro", "", "/tmp2", "/tmp", "", "", false, false}, - {"/tmp:/tmp3:rw", "", "/tmp3", "/tmp", "", "", true, false}, - {"/tmp:/tmp4:foo", "", "", "", "", "", false, true}, - {"name:/named1", "", "/named1", "", "name", "local", true, false}, - {"name:/named2", "external", "/named2", "", "name", "external", true, false}, - {"name:/named3:ro", "local", "/named3", "", "name", "local", false, false}, - {"local/name:/tmp:rw", "", "/tmp", "", "local/name", "local", true, false}, - {"/tmp:tmp", "", "", "", "", "", true, true}, - } - } - - for _, c := range cases { - m, err := ParseMountSpec(c.bind, c.driver) - if c.fail { - if err == nil { - t.Fatalf("Expected error, was nil, for spec %s\n", c.bind) - } - continue - } - - if m == nil || err != nil { - t.Fatalf("ParseMountSpec failed for spec %s driver %s error %v\n", c.bind, c.driver, err.Error()) - continue - } - - if m.Destination != c.expDest { - t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind) - } - - if m.Source != c.expSource { - t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind) - } - - if m.Name != c.expName { - t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind) - } - - if m.Driver != c.expDriver { - t.Fatalf("Expected driver %s, was %s, for spec %s\n", c.expDriver, m.Driver, c.bind) - } - - if m.RW != c.expRW { - t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind) - } - } -} diff --git a/vendor/github.com/docker/docker/volume/volume_unix.go b/vendor/github.com/docker/docker/volume/volume_unix.go index 8c98a3d9..2520d7c1 100644 --- a/vendor/github.com/docker/docker/volume/volume_unix.go +++ b/vendor/github.com/docker/docker/volume/volume_unix.go @@ -1,4 +1,4 @@ -// +build linux freebsd darwin +// +build linux freebsd darwin solaris package volume @@ -6,28 +6,18 @@ import ( "fmt" "path/filepath" "strings" - - derr "github.com/docker/docker/errors" ) // read-write modes var rwModes = map[string]bool{ - "rw": true, - "rw,Z": true, - "rw,z": true, - "z,rw": true, - "Z,rw": true, - "Z": true, - "z": true, + "rw": true, + "ro": true, } -// read-only modes -var roModes = map[string]bool{ - "ro": true, - "ro,Z": true, - "ro,z": true, - "z,ro": true, - "Z,ro": true, +// label modes +var labelModes = map[string]bool{ + "Z": true, + "z": true, } // BackwardsCompatible decides whether this mount point can be @@ -51,15 +41,16 @@ func ParseMountSpec(spec, volumeDriver string) (*MountPoint, error) { spec = filepath.ToSlash(spec) mp := &MountPoint{ - RW: true, + RW: true, + Propagation: DefaultPropagationMode, } if strings.Count(spec, ":") > 2 { - return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) + return nil, errInvalidSpec(spec) } arr := strings.SplitN(spec, ":", 3) if arr[0] == "" { - return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) + return nil, errInvalidSpec(spec) } switch len(arr) { @@ -70,7 +61,7 @@ func ParseMountSpec(spec, volumeDriver string) (*MountPoint, error) { if isValid := ValidMountMode(arr[1]); isValid { // Destination + Mode is not a valid volume - volumes // cannot include a mode. eg /foo:rw - return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) + return nil, errInvalidSpec(spec) } // Host Source Path or Name + Destination mp.Source = arr[0] @@ -81,35 +72,51 @@ func ParseMountSpec(spec, volumeDriver string) (*MountPoint, error) { mp.Destination = arr[1] mp.Mode = arr[2] // Mode field is used by SELinux to decide whether to apply label if !ValidMountMode(mp.Mode) { - return nil, derr.ErrorCodeVolumeInvalidMode.WithArgs(mp.Mode) + return nil, errInvalidMode(mp.Mode) } mp.RW = ReadWrite(mp.Mode) + mp.Propagation = GetPropagation(mp.Mode) default: - return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) + return nil, errInvalidSpec(spec) } //validate the volumes destination path mp.Destination = filepath.Clean(mp.Destination) if !filepath.IsAbs(mp.Destination) { - return nil, derr.ErrorCodeVolumeAbs.WithArgs(mp.Destination) + return nil, fmt.Errorf("Invalid volume destination path: '%s' mount path must be absolute.", mp.Destination) } // Destination cannot be "/" if mp.Destination == "/" { - return nil, derr.ErrorCodeVolumeSlash.WithArgs(spec) + return nil, fmt.Errorf("Invalid specification: destination can't be '/' in '%s'", spec) } name, source := ParseVolumeSource(mp.Source) if len(source) == 0 { mp.Source = "" // Clear it out as we previously assumed it was not a name mp.Driver = volumeDriver - if len(mp.Driver) == 0 { - mp.Driver = DefaultDriverName + // Named volumes can't have propagation properties specified. + // Their defaults will be decided by docker. This is just a + // safeguard. Don't want to get into situations where named + // volumes were mounted as '[r]shared' inside container and + // container does further mounts under that volume and these + // mounts become visible on host and later original volume + // cleanup becomes an issue if container does not unmount + // submounts explicitly. + if HasPropagation(mp.Mode) { + return nil, errInvalidSpec(spec) } } else { mp.Source = filepath.Clean(source) } + copyData, isSet := getCopyMode(mp.Mode) + // do not allow copy modes on binds + if len(name) == 0 && isSet { + return nil, errInvalidMode(mp.Mode) + } + + mp.CopyData = copyData mp.Name = name return mp, nil @@ -130,3 +137,50 @@ func ParseVolumeSource(spec string) (string, string) { func IsVolumeNameValid(name string) (bool, error) { return true, nil } + +// ValidMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode or not. +func ValidMountMode(mode string) bool { + rwModeCount := 0 + labelModeCount := 0 + propagationModeCount := 0 + copyModeCount := 0 + + for _, o := range strings.Split(mode, ",") { + switch { + case rwModes[o]: + rwModeCount++ + case labelModes[o]: + labelModeCount++ + case propagationModes[o]: + propagationModeCount++ + case copyModeExists(o): + copyModeCount++ + default: + return false + } + } + + // Only one string for each mode is allowed. + if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 { + return false + } + return true +} + +// ReadWrite tells you if a mode string is a valid read-write mode or not. +// If there are no specifications w.r.t read write mode, then by default +// it returns true. +func ReadWrite(mode string) bool { + if !ValidMountMode(mode) { + return false + } + + for _, o := range strings.Split(mode, ",") { + if o == "ro" { + return false + } + } + + return true +} diff --git a/vendor/github.com/docker/docker/volume/volume_windows.go b/vendor/github.com/docker/docker/volume/volume_windows.go deleted file mode 100644 index b6b3bbb0..00000000 --- a/vendor/github.com/docker/docker/volume/volume_windows.go +++ /dev/null @@ -1,181 +0,0 @@ -package volume - -import ( - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/Sirupsen/logrus" - derr "github.com/docker/docker/errors" -) - -// read-write modes -var rwModes = map[string]bool{ - "rw": true, -} - -// read-only modes -var roModes = map[string]bool{ - "ro": true, -} - -const ( - // Spec should be in the format [source:]destination[:mode] - // - // Examples: c:\foo bar:d:rw - // c:\foo:d:\bar - // myname:d: - // d:\ - // - // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See - // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to - // test is https://regex-golang.appspot.com/assets/html/index.html - // - // Useful link for referencing named capturing groups: - // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex - // - // There are three match groups: source, destination and mode. - // - - // RXHostDir is the first option of a source - RXHostDir = `[a-z]:\\(?:[^\\/:*?"<>|\r\n]+\\?)*` - // RXName is the second option of a source - RXName = `[^\\/:*?"<>|\r\n]+` - // RXReservedNames are reserved names not possible on Windows - RXReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` - - // RXSource is the combined possiblities for a source - RXSource = `((?P((` + RXHostDir + `)|(` + RXName + `))):)?` - - // Source. Can be either a host directory, a name, or omitted: - // HostDir: - // - Essentially using the folder solution from - // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html - // but adding case insensitivity. - // - Must be an absolute path such as c:\path - // - Can include spaces such as `c:\program files` - // - And then followed by a colon which is not in the capture group - // - And can be optional - // Name: - // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) - // - And then followed by a colon which is not in the capture group - // - And can be optional - - // RXDestination is the regex expression for the mount destination - RXDestination = `(?P([a-z]):((?:\\[^\\/:*?"<>\r\n]+)*\\?))` - // Destination (aka container path): - // - Variation on hostdir but can be a drive followed by colon as well - // - If a path, must be absolute. Can include spaces - // - Drive cannot be c: (explicitly checked in code, not RegEx) - // - - // RXMode is the regex expression for the mode of the mount - RXMode = `(:(?P(?i)rw))?` - // Temporarily for TP4, disabling the use of ro as it's not supported yet - // in the platform. TODO Windows: `(:(?P(?i)ro|rw))?` - // mode (optional) - // - Hopefully self explanatory in comparison to above. - // - Colon is not in the capture group - // -) - -// ParseMountSpec validates the configuration of mount information is valid. -func ParseMountSpec(spec string, volumeDriver string) (*MountPoint, error) { - var specExp = regexp.MustCompile(`^` + RXSource + RXDestination + RXMode + `$`) - - // Ensure in platform semantics for matching. The CLI will send in Unix semantics. - match := specExp.FindStringSubmatch(filepath.FromSlash(strings.ToLower(spec))) - - // Must have something back - if len(match) == 0 { - return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) - } - - // Pull out the sub expressions from the named capture groups - matchgroups := make(map[string]string) - for i, name := range specExp.SubexpNames() { - matchgroups[name] = strings.ToLower(match[i]) - } - - mp := &MountPoint{ - Source: matchgroups["source"], - Destination: matchgroups["destination"], - RW: true, - } - if strings.ToLower(matchgroups["mode"]) == "ro" { - mp.RW = false - } - - // Volumes cannot include an explicitly supplied mode eg c:\path:rw - if mp.Source == "" && mp.Destination != "" && matchgroups["mode"] != "" { - return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) - } - - // Note: No need to check if destination is absolute as it must be by - // definition of matching the regex. - - if filepath.VolumeName(mp.Destination) == mp.Destination { - // Ensure the destination path, if a drive letter, is not the c drive - if strings.ToLower(mp.Destination) == "c:" { - return nil, derr.ErrorCodeVolumeDestIsC.WithArgs(spec) - } - } else { - // So we know the destination is a path, not drive letter. Clean it up. - mp.Destination = filepath.Clean(mp.Destination) - // Ensure the destination path, if a path, is not the c root directory - if strings.ToLower(mp.Destination) == `c:\` { - return nil, derr.ErrorCodeVolumeDestIsCRoot.WithArgs(spec) - } - } - - // See if the source is a name instead of a host directory - if len(mp.Source) > 0 { - validName, err := IsVolumeNameValid(mp.Source) - if err != nil { - return nil, err - } - if validName { - // OK, so the source is a name. - mp.Name = mp.Source - mp.Source = "" - - // Set the driver accordingly - mp.Driver = volumeDriver - if len(mp.Driver) == 0 { - mp.Driver = DefaultDriverName - } - } else { - // OK, so the source must be a host directory. Make sure it's clean. - mp.Source = filepath.Clean(mp.Source) - } - } - - // Ensure the host path source, if supplied, exists and is a directory - if len(mp.Source) > 0 { - var fi os.FileInfo - var err error - if fi, err = os.Stat(mp.Source); err != nil { - return nil, derr.ErrorCodeVolumeSourceNotFound.WithArgs(mp.Source, err) - } - if !fi.IsDir() { - return nil, derr.ErrorCodeVolumeSourceNotDirectory.WithArgs(mp.Source) - } - } - - logrus.Debugf("MP: Source '%s', Dest '%s', RW %t, Name '%s', Driver '%s'", mp.Source, mp.Destination, mp.RW, mp.Name, mp.Driver) - return mp, nil -} - -// IsVolumeNameValid checks a volume name in a platform specific manner. -func IsVolumeNameValid(name string) (bool, error) { - nameExp := regexp.MustCompile(`^` + RXName + `$`) - if !nameExp.MatchString(name) { - return false, nil - } - nameExp = regexp.MustCompile(`^` + RXReservedNames + `$`) - if nameExp.MatchString(name) { - return false, derr.ErrorCodeVolumeNameReservedWord.WithArgs(name) - } - return true, nil -} diff --git a/vendor/github.com/docker/engine-api/.travis.yml b/vendor/github.com/docker/engine-api/.travis.yml new file mode 100644 index 00000000..d085af29 --- /dev/null +++ b/vendor/github.com/docker/engine-api/.travis.yml @@ -0,0 +1,9 @@ +--- + language: go + sudo: false + notifications: + email: false + go: + - 1.6 + install: make deps + script: make validate && make test diff --git a/vendor/github.com/docker/engine-api/CHANGELOG.md b/vendor/github.com/docker/engine-api/CHANGELOG.md new file mode 100644 index 00000000..d7f558ff --- /dev/null +++ b/vendor/github.com/docker/engine-api/CHANGELOG.md @@ -0,0 +1,129 @@ +# Changelog + +Items starting with DEPRECATE are important deprecation notices. For more information on the list of deprecated APIs please have a look at https://docs.docker.com/misc/deprecated/ where target removal dates can also be found. + +## 0.3.0 (2016-03-22) + +### Client + +- Add context to every function. +- Fix issue loading a default TLS CA. +- Allow to configure the client with a given http.Client. +- Add support for Windows named pipes. +- Set default host for Solaris. +- Add quiet flag for image load. +- Add ability to hijack connections through a proxy. +- Correctly set content type for image load. +- Add support for getting token for login. + +### Types + +- Add struct for update restart policy. +- Add human friendly State for container. +- Use OS specific host when DOCKER_HOST is not set. +- Rename Status in info to SystemStatus. +- Add internal flag to network inspect. +- Add disk quota field to container. +- Add EnableIPv6 fields. +- Add Mounts to container. +- Add cgroup driver to info. +- Add userns to host config. +- Remove email from AuthConfig. +- Make AuthConfig fields optional. +- Add IO resource settings for Windows. +- Add storage driver to host config. +- Update NetworkName to return proper user defined network names. +- Support joining cgroups by container id. +- Add KernelMemory to info. +- Add UsernsMode to container config. +- Add CPU resource control for Windows. +- Add AutoRemove to host config. +- Add Status field to Volume. +- Add Label to Image, Network and Volume. +- Add RootFS to container. + +## 0.2.3 (2016-02-02) + +### Types + +- Add missing status field. + +## 0.2.2 (2016-01-13) + +### Client + +- Fix issue configuring response hijacking with TLS enabled. + + +## 0.2.1 (2016-01-12) + +### Client + +- Fix issue detecting missing images on container creation. + +### Types + +- Remove invalid json tag in endpoint configuration. +- Add missing fields in info structure. + +## 0.2.0 (2016-01-11) + +### Client + +- Allow to force network disconnection. (docker 1.10) + +### Types + +- Add global and local alias configuration to network endpoint. +- Add network ID to network endpoint. +- Add IPAM options. +- Add Seccomp options. +- Fix issue referencing OOMKillDisable. + + +## 0.1.3 (2016-01-07) + +### Client + +- Fix issue sending all network configurations for a per network request. + + +## 0.1.2 (2016-01-07) + +### Client + +- Add interface to represent the API client. +- Restrict the fields send to the update endpoint to only those that are used. +- Send network settings as part of the container create request. (docker 1.10) +- Send network settings as part of the network connect request. (docker 1.10) + +### Types + +- Add PidsLimit as part of the host configuration. +- Add PidsStats to show PID stats. +- Add graph storage options to host configuration. +- Add NetworkConfig and EndpointIPAMConfig structs. (docker 1.10) + + +## 0.1.1 (2016-01-06) + +### Client + +- Delegate shmSize units conversion to the consumer. + +### Types + +- Add warnings to the volume list reponse. +- Fix image build options: + * use 0 as default value for shmSize. + + +## 0.1.0 (2016-01-04) + +### Client + +- Initial API client implementation. + +### Types + +- Initial API types implementation. diff --git a/vendor/github.com/docker/engine-api/CONTRIBUTING.md b/vendor/github.com/docker/engine-api/CONTRIBUTING.md new file mode 100644 index 00000000..926dcc93 --- /dev/null +++ b/vendor/github.com/docker/engine-api/CONTRIBUTING.md @@ -0,0 +1,55 @@ +# Contributing to Docker + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/engine-api/LICENSE b/vendor/github.com/docker/engine-api/LICENSE new file mode 100644 index 00000000..c157bff9 --- /dev/null +++ b/vendor/github.com/docker/engine-api/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/engine-api/MAINTAINERS b/vendor/github.com/docker/engine-api/MAINTAINERS new file mode 100644 index 00000000..2bc4fd6d --- /dev/null +++ b/vendor/github.com/docker/engine-api/MAINTAINERS @@ -0,0 +1,45 @@ +# engine-api maintainers file +# +# This file describes who runs the docker/engine-api project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "calavera", + "dongluochen", + "mhbauer", + "vdemeester" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" + + [people.dongluochen] + Name = "Dongluo Chen" + Email = "dongluo.chen@docker.com" + GitHub = "dongluochen" + + [people.mhbauer] + Name = "Morgan Bauer" + Email = "mbauer@us.ibm.com" + GitHub = "mhbauer" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" diff --git a/vendor/github.com/docker/engine-api/Makefile b/vendor/github.com/docker/engine-api/Makefile new file mode 100644 index 00000000..a4a8595a --- /dev/null +++ b/vendor/github.com/docker/engine-api/Makefile @@ -0,0 +1,15 @@ +.PHONY: all deps test validate + +all: deps test validate + +deps: + go get -t ./... + go get github.com/golang/lint/golint + +test: + go test -race -cover ./... + +validate: + go vet ./... + test -z "$(golint ./... | tee /dev/stderr)" + test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/engine-api/README.md b/vendor/github.com/docker/engine-api/README.md new file mode 100644 index 00000000..897dbdf9 --- /dev/null +++ b/vendor/github.com/docker/engine-api/README.md @@ -0,0 +1,68 @@ +[![GoDoc](https://godoc.org/github.com/docker/engine-api?status.svg)](https://godoc.org/github.com/docker/engine-api) + +# Introduction + +Engine-api is a set of Go libraries to implement client and server components compatible with the Docker engine. +The code was extracted from the [Docker engine](https://github.com/docker/docker) and contributed back as an external library. + +## Components + +### Client + +The client package implements a fully featured http client to interact with the Docker engine. It's modeled after the requirements of the Docker engine CLI, but it can also serve other purposes. + +#### Usage + +You can use this client package in your applications by creating a new client object. Then use that object to execute operations against the remote server. Follow the example below to see how to list all the containers running in a Docker engine host: + +```go +package main + +import ( + "fmt" + + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +func main() { + defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"} + cli, err := client.NewClient("unix:///var/run/docker.sock", "v1.22", nil, defaultHeaders) + if err != nil { + panic(err) + } + + options := types.ContainerListOptions{All: true} + containers, err := cli.ContainerList(context.Background(), options) + if err != nil { + panic(err) + } + + for _, c := range containers { + fmt.Println(c.ID) + } +} +``` + +### Types + +The types package includes all typed structures that client and server serialize to execute operations. + +### Server + +The server package includes API endpoints that applications compatible with the Docker engine API can reuse. It also provides useful middlewares and helpers to handle http requests. + +This package is still pending to be extracted from the Docker engine. + +## Developing + +engine-api requires some minimal libraries that you can download running `make deps`. + +To run tests, use the command `make test`. We use build tags to isolate functions and structures that are only available for testing. + +To validate the sources, use the command `make validate`. + +## License + +engine-api is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/docker/engine-api/appveyor.yml b/vendor/github.com/docker/engine-api/appveyor.yml new file mode 100644 index 00000000..b31137f3 --- /dev/null +++ b/vendor/github.com/docker/engine-api/appveyor.yml @@ -0,0 +1,37 @@ +version: "{build}" + +# Source Config +clone_folder: c:\gopath\src\github.com\docker\engine-api + +# Build host + +environment: + GOPATH: c:\gopath + GOVERSION: 1.6 + +init: + - git config --global core.autocrlf input + +# Build + +install: + # Install Go 1.6. + - rmdir c:\go /s /q + - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi + - msiexec /i go%GOVERSION%.windows-amd64.msi /q + - set Path=c:\go\bin;c:\gopath\bin;%Path% + - go version + - go env + +build: false +deploy: false + +before_test: + - go get -t ./... + - go get github.com/golang/lint/golint + +test_script: + - go vet ./... + - golint ./... + - gofmt -s -l . + - go test -race -cover -v -tags=test ./... diff --git a/vendor/github.com/docker/engine-api/client/client.go b/vendor/github.com/docker/engine-api/client/client.go new file mode 100644 index 00000000..0716667b --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/client.go @@ -0,0 +1,135 @@ +package client + +import ( + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/docker/engine-api/client/transport" + "github.com/docker/go-connections/tlsconfig" +) + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // transport is the interface to sends request with, it implements transport.Client. + transport transport.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string +} + +// NewEnvClient initializes a new API client based on environment variables. +// Use DOCKER_HOST to set the url to the docker server. +// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// Use DOCKER_CERT_PATH to load the tls certificates from. +// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func NewEnvClient() (*Client, error) { + var client *http.Client + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + } + } + + host := os.Getenv("DOCKER_HOST") + if host == "" { + host = DefaultDockerHost + } + return NewClient(host, os.Getenv("DOCKER_API_VERSION"), client, nil) +} + +// NewClient initializes a new API client for the given host and API version. +// It won't send any version information if the version number is empty. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + proto, addr, basePath, err := ParseHost(host) + if err != nil { + return nil, err + } + + transport, err := transport.NewTransportWithHTTP(proto, addr, client) + if err != nil { + return nil, err + } + + return &Client{ + proto: proto, + addr: addr, + basePath: basePath, + transport: transport, + version: version, + customHTTPHeaders: httpHeaders, + }, nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(p string, query url.Values) string { + var apiPath string + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) + } else { + apiPath = fmt.Sprintf("%s%s", cli.basePath, p) + } + + u := &url.URL{ + Path: apiPath, + } + if len(query) > 0 { + u.RawQuery = query.Encode() + } + return u.String() +} + +// ClientVersion returns the version string associated with this +// instance of the Client. Note that this value can be changed +// via the DOCKER_API_VERSION env var. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// ParseHost verifies that the given host strings is valid. +func ParseHost(host string) (string, string, string, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return "", "", "", err + } + addr = parsed.Host + basePath = parsed.Path + } + return proto, addr, basePath, nil +} diff --git a/vendor/github.com/docker/engine-api/client/client_darwin.go b/vendor/github.com/docker/engine-api/client/client_darwin.go new file mode 100644 index 00000000..4b47a178 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/client_darwin.go @@ -0,0 +1,4 @@ +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "tcp://127.0.0.1:2375" diff --git a/vendor/github.com/docker/engine-api/client/client_unix.go b/vendor/github.com/docker/engine-api/client/client_unix.go new file mode 100644 index 00000000..572c5f87 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/client_unix.go @@ -0,0 +1,6 @@ +// +build linux freebsd solaris openbsd + +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/github.com/docker/engine-api/client/client_windows.go b/vendor/github.com/docker/engine-api/client/client_windows.go new file mode 100644 index 00000000..07c0c7a7 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/client_windows.go @@ -0,0 +1,4 @@ +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/github.com/docker/engine-api/client/container_attach.go b/vendor/github.com/docker/engine-api/client/container_attach.go new file mode 100644 index 00000000..d87fc655 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_attach.go @@ -0,0 +1,34 @@ +package client + +import ( + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerAttach(ctx context.Context, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + + headers := map[string][]string{"Content-Type": {"text/plain"}} + return cli.postHijacked(ctx, "/containers/"+options.ContainerID+"/attach", query, nil, headers) +} diff --git a/vendor/github.com/docker/engine-api/client/container_commit.go b/vendor/github.com/docker/engine-api/client/container_commit.go new file mode 100644 index 00000000..8a6c8993 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_commit.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerCommit applies changes into a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) { + query := url.Values{} + query.Set("container", options.ContainerID) + query.Set("repo", options.RepositoryName) + query.Set("tag", options.Tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if options.Pause != true { + query.Set("pause", "0") + } + + var response types.ContainerCommitResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/engine-api/client/container_copy.go b/vendor/github.com/docker/engine-api/client/container_copy.go new file mode 100644 index 00000000..aaf1f775 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_copy.go @@ -0,0 +1,97 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" +) + +// ContainerStatPath returns Stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := fmt.Sprintf("/containers/%s/archive", containerID) + response, err := cli.head(ctx, urlStr, query, nil) + if err != nil { + return types.ContainerPathStat{}, err + } + defer ensureReaderClosed(response) + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +func (cli *Client) CopyToContainer(ctx context.Context, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(options.Path)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + path := fmt.Sprintf("/containers/%s/archive", options.ContainerID) + + response, err := cli.putRaw(ctx, path, query, options.Content, nil) + if err != nil { + return err + } + defer ensureReaderClosed(response) + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} + +// CopyFromContainer get the content from the container and return it as a Reader +// to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := fmt.Sprintf("/containers/%s/archive", containerID) + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, err + } + + if response.statusCode != http.StatusOK { + return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/vendor/github.com/docker/engine-api/client/container_create.go b/vendor/github.com/docker/engine-api/client/container_create.go new file mode 100644 index 00000000..98935794 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_create.go @@ -0,0 +1,46 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/network" + "golang.org/x/net/context" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based in the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) { + var response types.ContainerCreateResponse + query := url.Values{} + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + if err != nil { + if serverResp != nil && serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { + return response, imageNotFoundError{config.Image} + } + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/engine-api/client/container_diff.go b/vendor/github.com/docker/engine-api/client/container_diff.go new file mode 100644 index 00000000..f4bb3a46 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_diff.go @@ -0,0 +1,23 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) { + var changes []types.ContainerChange + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + ensureReaderClosed(serverResp) + return changes, err +} diff --git a/vendor/github.com/docker/engine-api/client/container_exec.go b/vendor/github.com/docker/engine-api/client/container_exec.go new file mode 100644 index 00000000..159c9dfd --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_exec.go @@ -0,0 +1,49 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, config types.ExecConfig) (types.ContainerExecCreateResponse, error) { + var response types.ContainerExecCreateResponse + resp, err := cli.post(ctx, "/containers/"+config.Container+"/exec", nil, config, nil) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} + +// ContainerExecStart starts an exec process already create in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { + headers := map[string][]string{"Content-Type": {"application/json"}} + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/engine-api/client/container_export.go b/vendor/github.com/docker/engine-api/client/container_export.go new file mode 100644 index 00000000..1925113e --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_export.go @@ -0,0 +1,20 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as a io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/vendor/github.com/docker/engine-api/client/container_inspect.go b/vendor/github.com/docker/engine-api/client/container_inspect.go new file mode 100644 index 00000000..afd71eef --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_inspect.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} + +// ContainerInspectWithRaw returns the container information and it's raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, nil, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} + +func (cli *Client) containerInspectWithResponse(ctx context.Context, containerID string, query url.Values) (types.ContainerJSON, *serverResponse, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + if err != nil { + return types.ContainerJSON{}, serverResp, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, serverResp, err +} diff --git a/vendor/github.com/docker/engine-api/client/container_kill.go b/vendor/github.com/docker/engine-api/client/container_kill.go new file mode 100644 index 00000000..29f80c73 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_kill.go @@ -0,0 +1,17 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + query.Set("signal", signal) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/client/container_list.go b/vendor/github.com/docker/engine-api/client/container_list.go new file mode 100644 index 00000000..573f41d5 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_list.go @@ -0,0 +1,55 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit != -1 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filter.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filter) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + ensureReaderClosed(resp) + return containers, err +} diff --git a/vendor/github.com/docker/engine-api/client/container_logs.go b/vendor/github.com/docker/engine-api/client/container_logs.go new file mode 100644 index 00000000..47c60ee3 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_logs.go @@ -0,0 +1,48 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + timetypes "github.com/docker/engine-api/types/time" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ContainerLogs(ctx context.Context, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+options.ContainerID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/engine-api/client/container_pause.go b/vendor/github.com/docker/engine-api/client/container_pause.go new file mode 100644 index 00000000..412067a7 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_pause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/client/container_remove.go b/vendor/github.com/docker/engine-api/client/container_remove.go new file mode 100644 index 00000000..56796231 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_remove.go @@ -0,0 +1,27 @@ +package client + +import ( + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+options.ContainerID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/client/container_rename.go b/vendor/github.com/docker/engine-api/client/container_rename.go new file mode 100644 index 00000000..0e718da7 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_rename.go @@ -0,0 +1,16 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/client/container_resize.go b/vendor/github.com/docker/engine-api/client/container_resize.go new file mode 100644 index 00000000..07820174 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_resize.go @@ -0,0 +1,29 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+options.ID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+options.ID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width int) error { + query := url.Values{} + query.Set("h", strconv.Itoa(height)) + query.Set("w", strconv.Itoa(width)) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/client/container_restart.go b/vendor/github.com/docker/engine-api/client/container_restart.go new file mode 100644 index 00000000..1c74b18c --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_restart.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + "strconv" + + "golang.org/x/net/context" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon to wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout int) error { + query := url.Values{} + query.Set("t", strconv.Itoa(timeout)) + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/client/container_start.go b/vendor/github.com/docker/engine-api/client/container_start.go new file mode 100644 index 00000000..12a97942 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_start.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/client/container_stats.go b/vendor/github.com/docker/engine-api/client/container_stats.go new file mode 100644 index 00000000..2cc67c3a --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_stats.go @@ -0,0 +1,24 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return nil, err + } + return resp.body, err +} diff --git a/vendor/github.com/docker/engine-api/client/container_stop.go b/vendor/github.com/docker/engine-api/client/container_stop.go new file mode 100644 index 00000000..34d78629 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_stop.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + "strconv" + + "golang.org/x/net/context" +) + +// ContainerStop stops a container without terminating the process. +// The process is blocked until the container stops or the timeout expires. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout int) error { + query := url.Values{} + query.Set("t", strconv.Itoa(timeout)) + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/client/container_top.go b/vendor/github.com/docker/engine-api/client/container_top.go new file mode 100644 index 00000000..5ad926ae --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_top.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) { + var response types.ContainerProcessList + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/engine-api/client/container_unpause.go b/vendor/github.com/docker/engine-api/client/container_unpause.go new file mode 100644 index 00000000..5c762112 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_unpause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/client/container_update.go b/vendor/github.com/docker/engine-api/client/container_update.go new file mode 100644 index 00000000..a5a1826d --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_update.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/engine-api/types/container" + "golang.org/x/net/context" +) + +// ContainerUpdate updates resources of a container +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/client/container_wait.go b/vendor/github.com/docker/engine-api/client/container_wait.go new file mode 100644 index 00000000..ca8c443b --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/container_wait.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" +) + +// ContainerWait pauses execution util a container is exits. +// It returns the API status code as response of its readiness. +func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int, error) { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + return -1, err + } + defer ensureReaderClosed(resp) + + var res types.ContainerWaitResponse + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + return -1, err + } + + return res.StatusCode, nil +} diff --git a/vendor/github.com/docker/engine-api/client/errors.go b/vendor/github.com/docker/engine-api/client/errors.go new file mode 100644 index 00000000..9bcc78eb --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/errors.go @@ -0,0 +1,94 @@ +package client + +import ( + "errors" + "fmt" +) + +// ErrConnectionFailed is a error raised when the connection between the client and the server failed. +var ErrConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") + +// imageNotFoundError implements an error returned when an image is not in the docker host. +type imageNotFoundError struct { + imageID string +} + +// Error returns a string representation of an imageNotFoundError +func (i imageNotFoundError) Error() string { + return fmt.Sprintf("Error: No such image: %s", i.imageID) +} + +// IsErrImageNotFound returns true if the error is caused +// when an image is not found in the docker host. +func IsErrImageNotFound(err error) bool { + _, ok := err.(imageNotFoundError) + return ok +} + +// containerNotFoundError implements an error returned when a container is not in the docker host. +type containerNotFoundError struct { + containerID string +} + +// Error returns a string representation of an containerNotFoundError +func (e containerNotFoundError) Error() string { + return fmt.Sprintf("Error: No such container: %s", e.containerID) +} + +// IsErrContainerNotFound returns true if the error is caused +// when a container is not found in the docker host. +func IsErrContainerNotFound(err error) bool { + _, ok := err.(containerNotFoundError) + return ok +} + +// networkNotFoundError implements an error returned when a network is not in the docker host. +type networkNotFoundError struct { + networkID string +} + +// Error returns a string representation of an networkNotFoundError +func (e networkNotFoundError) Error() string { + return fmt.Sprintf("Error: No such network: %s", e.networkID) +} + +// IsErrNetworkNotFound returns true if the error is caused +// when a network is not found in the docker host. +func IsErrNetworkNotFound(err error) bool { + _, ok := err.(networkNotFoundError) + return ok +} + +// volumeNotFoundError implements an error returned when a volume is not in the docker host. +type volumeNotFoundError struct { + volumeID string +} + +// Error returns a string representation of an networkNotFoundError +func (e volumeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such volume: %s", e.volumeID) +} + +// IsErrVolumeNotFound returns true if the error is caused +// when a volume is not found in the docker host. +func IsErrVolumeNotFound(err error) bool { + _, ok := err.(volumeNotFoundError) + return ok +} + +// unauthorizedError represents an authorization error in a remote registry. +type unauthorizedError struct { + cause error +} + +// Error returns a string representation of an unauthorizedError +func (u unauthorizedError) Error() string { + return u.cause.Error() +} + +// IsErrUnauthorized returns true if the error is caused +// when an the remote registry authentication fails +func IsErrUnauthorized(err error) bool { + _, ok := err.(unauthorizedError) + return ok +} diff --git a/vendor/github.com/docker/engine-api/client/events.go b/vendor/github.com/docker/engine-api/client/events.go new file mode 100644 index 00000000..e379ce0a --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/events.go @@ -0,0 +1,48 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + timetypes "github.com/docker/engine-api/types/time" +) + +// Events returns a stream of events in the daemon in a ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + serverResponse, err := cli.get(ctx, "/events", query, nil) + if err != nil { + return nil, err + } + return serverResponse.body, nil +} diff --git a/vendor/github.com/docker/engine-api/client/hijack.go b/vendor/github.com/docker/engine-api/client/hijack.go new file mode 100644 index 00000000..2fd46b6c --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/hijack.go @@ -0,0 +1,174 @@ +package client + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/http/httputil" + "net/url" + "strings" + "time" + + "github.com/docker/engine-api/types" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// tlsClientCon holds tls information and a dialed connection. +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if conn, ok := c.rawConn.(types.CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + req, err := cli.newRequest("POST", path, query, bodyEncoded, headers) + if err != nil { + return types.HijackedResponse{}, err + } + req.Host = cli.addr + + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "tcp") + + conn, err := dial(cli.proto, cli.addr, cli.transport.TLSConfig()) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + return types.HijackedResponse{}, err + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + clientconn.Do(req) + + rwc, br := clientconn.Hijack() + + return types.HijackedResponse{Conn: rwc, Reader: br}, nil +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} + +// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in +// order to return our custom tlsClientCon struct which holds both the tls.Conn +// object _and_ its underlying raw connection. The rationale for this is that +// we need to be able to close the write end of the connection when attaching, +// which tls.Conn does not provide. +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + proxyDialer, err := sockets.DialerFromEnvironment(dialer) + if err != nil { + return nil, err + } + + rawConn, err := proxyDialer.Dial(network, addr) + if err != nil { + return nil, err + } + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := rawConn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + c := *config + c.ServerName = hostname + config = &c + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + // Notice this isn't Go standard's tls.Dial function + return tlsDial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} diff --git a/vendor/github.com/docker/engine-api/client/image_build.go b/vendor/github.com/docker/engine-api/client/image_build.go new file mode 100644 index 00000000..1612c6a1 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/image_build.go @@ -0,0 +1,134 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// ImageBuild sends request to the daemon to build images. +// The Body in the response implement an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, options.Context, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + return query, nil +} + +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// convertKVStringsToMap converts ["key=value"] to {"key":"value"} +func convertKVStringsToMap(values []string) map[string]string { + result := make(map[string]string, len(values)) + for _, value := range values { + kv := strings.SplitN(value, "=", 2) + if len(kv) == 1 { + result[kv[0]] = "" + } else { + result[kv[0]] = kv[1] + } + } + + return result +} diff --git a/vendor/github.com/docker/engine-api/client/image_create.go b/vendor/github.com/docker/engine-api/client/image_create.go new file mode 100644 index 00000000..1ec1f9d0 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/image_create.go @@ -0,0 +1,28 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" +) + +// ImageCreate creates a new image based in the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, options types.ImageCreateOptions) (io.ReadCloser, error) { + query := url.Values{} + query.Set("fromImage", options.Parent) + query.Set("tag", options.Tag) + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/vendor/github.com/docker/engine-api/client/image_history.go b/vendor/github.com/docker/engine-api/client/image_history.go new file mode 100644 index 00000000..b2840b5e --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/image_history.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) { + var history []types.ImageHistory + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + ensureReaderClosed(serverResp) + return history, err +} diff --git a/vendor/github.com/docker/engine-api/client/image_import.go b/vendor/github.com/docker/engine-api/client/image_import.go new file mode 100644 index 00000000..48e2c951 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/image_import.go @@ -0,0 +1,29 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" +) + +// ImageImport creates a new image based in the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, options types.ImageImportOptions) (io.ReadCloser, error) { + query := url.Values{} + query.Set("fromSrc", options.SourceName) + query.Set("repo", options.RepositoryName) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, options.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/engine-api/client/image_inspect.go b/vendor/github.com/docker/engine-api/client/image_inspect.go new file mode 100644 index 00000000..761a994c --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/image_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ImageInspectWithRaw returns the image information and it's raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string, getSize bool) (types.ImageInspect, []byte, error) { + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ImageInspect{}, nil, imageNotFoundError{imageID} + } + return types.ImageInspect{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/engine-api/client/image_list.go b/vendor/github.com/docker/engine-api/client/image_list.go new file mode 100644 index 00000000..347810e6 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/image_list.go @@ -0,0 +1,40 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) { + var images []types.Image + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.MatchName != "" { + // FIXME rename this parameter, to not be confused with the filters flag + query.Set("filter", options.MatchName) + } + if options.All { + query.Set("all", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + ensureReaderClosed(serverResp) + return images, err +} diff --git a/vendor/github.com/docker/engine-api/client/image_load.go b/vendor/github.com/docker/engine-api/client/image_load.go new file mode 100644 index 00000000..84ee19c3 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/image_load.go @@ -0,0 +1,30 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser returned by +// this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/vendor/github.com/docker/engine-api/client/image_pull.go b/vendor/github.com/docker/engine-api/client/image_pull.go new file mode 100644 index 00000000..09044376 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/image_pull.go @@ -0,0 +1,36 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" +) + +// ImagePull request the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePull(ctx context.Context, options types.ImagePullOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error) { + query := url.Values{} + query.Set("fromImage", options.ImageID) + if options.Tag != "" { + query.Set("tag", options.Tag) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized { + newAuthHeader, privilegeErr := privilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/engine-api/client/image_push.go b/vendor/github.com/docker/engine-api/client/image_push.go new file mode 100644 index 00000000..ca2cb43b --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/image_push.go @@ -0,0 +1,38 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" +) + +// ImagePush request the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, options types.ImagePushOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error) { + query := url.Values{} + query.Set("tag", options.Tag) + + resp, err := cli.tryImagePush(ctx, options.ImageID, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized { + newAuthHeader, privilegeErr := privilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, options.ImageID, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (*serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/vendor/github.com/docker/engine-api/client/image_remove.go b/vendor/github.com/docker/engine-api/client/image_remove.go new file mode 100644 index 00000000..d7e71c89 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/image_remove.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + resp, err := cli.delete(ctx, "/images/"+options.ImageID, query, nil) + if err != nil { + return nil, err + } + + var dels []types.ImageDelete + err = json.NewDecoder(resp.body).Decode(&dels) + ensureReaderClosed(resp) + return dels, err +} diff --git a/vendor/github.com/docker/engine-api/client/image_save.go b/vendor/github.com/docker/engine-api/client/image_save.go new file mode 100644 index 00000000..c0feb3ed --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/image_save.go @@ -0,0 +1,22 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ImageSave retrieves one or more images from the docker host as a io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/engine-api/client/image_search.go b/vendor/github.com/docker/engine-api/client/image_search.go new file mode 100644 index 00000000..ebe9dc0b --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/image_search.go @@ -0,0 +1,40 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/registry" + "golang.org/x/net/context" +) + +// ImageSearch makes the docker host to search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, options types.ImageSearchOptions, privilegeFunc RequestPrivilegeFunc) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", options.Term) + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized { + newAuthHeader, privilegeErr := privilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + ensureReaderClosed(resp) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/vendor/github.com/docker/engine-api/client/image_tag.go b/vendor/github.com/docker/engine-api/client/image_tag.go new file mode 100644 index 00000000..20feda38 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/image_tag.go @@ -0,0 +1,22 @@ +package client + +import ( + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, options types.ImageTagOptions) error { + query := url.Values{} + query.Set("repo", options.RepositoryName) + query.Set("tag", options.Tag) + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.post(ctx, "/images/"+options.ImageID+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/client/info.go b/vendor/github.com/docker/engine-api/client/info.go new file mode 100644 index 00000000..ff0958d6 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/info.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + if err != nil { + return info, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/vendor/github.com/docker/engine-api/client/interface.go b/vendor/github.com/docker/engine-api/client/interface.go new file mode 100644 index 00000000..e95ed555 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/interface.go @@ -0,0 +1,78 @@ +package client + +import ( + "io" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/filters" + "github.com/docker/engine-api/types/network" + "github.com/docker/engine-api/types/registry" +) + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + ClientVersion() string + ContainerAttach(ctx context.Context, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) + ContainerDiff(ctx context.Context, ontainerID string) ([]types.ContainerChange, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, config types.ExecConfig) (types.ContainerExecCreateResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, containerID, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, containerID string) error + ContainerRemove(ctx context.Context, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, containerID, newContainerName string) error + ContainerResize(ctx context.Context, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, containerID string, timeout int) error + ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) + ContainerStart(ctx context.Context, containerID string) error + ContainerStop(ctx context.Context, containerID string, timeout int) error + ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) + ContainerUnpause(ctx context.Context, containerID string) error + ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) error + ContainerWait(ctx context.Context, containerID string) (int, error) + CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, options types.CopyToContainerOptions) error + Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) + ImageBuild(ctx context.Context, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + ImageCreate(ctx context.Context, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) + ImageImport(ctx context.Context, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, imageID string, getSize bool) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, options types.ImagePullOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error) + ImagePush(ctx context.Context, options types.ImagePushOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error) + ImageRemove(ctx context.Context, options types.ImageRemoveOptions) ([]types.ImageDelete, error) + ImageSearch(ctx context.Context, options types.ImageSearchOptions, privilegeFunc RequestPrivilegeFunc) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, options types.ImageTagOptions) error + Info(ctx context.Context) (types.Info, error) + NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error + NetworkCreate(ctx context.Context, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error + NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, networkID string) error + RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) + ServerVersion(ctx context.Context) (types.Version, error) + VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) + VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) + VolumeRemove(ctx context.Context, volumeID string) error +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/engine-api/client/login.go b/vendor/github.com/docker/engine-api/client/login.go new file mode 100644 index 00000000..482f9478 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/login.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns UnauthorizerError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + + if resp != nil && resp.statusCode == http.StatusUnauthorized { + return types.AuthResponse{}, unauthorizedError{err} + } + if err != nil { + return types.AuthResponse{}, err + } + + var response types.AuthResponse + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/engine-api/client/network_connect.go b/vendor/github.com/docker/engine-api/client/network_connect.go new file mode 100644 index 00000000..9a402a3e --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/network_connect.go @@ -0,0 +1,18 @@ +package client + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/network" + "golang.org/x/net/context" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/client/network_create.go b/vendor/github.com/docker/engine-api/client/network_create.go new file mode 100644 index 00000000..2c41ad7e --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/network_create.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, options, nil) + if err != nil { + return response, err + } + + json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/engine-api/client/network_disconnect.go b/vendor/github.com/docker/engine-api/client/network_disconnect.go new file mode 100644 index 00000000..a3e33672 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/network_disconnect.go @@ -0,0 +1,14 @@ +package client + +import ( + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/client/network_inspect.go b/vendor/github.com/docker/engine-api/client/network_inspect.go new file mode 100644 index 00000000..4f81e5ce --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/network_inspect.go @@ -0,0 +1,24 @@ +package client + +import ( + "encoding/json" + "net/http" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) { + var networkResource types.NetworkResource + resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return networkResource, networkNotFoundError{networkID} + } + return networkResource, err + } + err = json.NewDecoder(resp.body).Decode(&networkResource) + ensureReaderClosed(resp) + return networkResource, err +} diff --git a/vendor/github.com/docker/engine-api/client/network_list.go b/vendor/github.com/docker/engine-api/client/network_list.go new file mode 100644 index 00000000..813109c1 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/network_list.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + ensureReaderClosed(resp) + return networkResources, err +} diff --git a/vendor/github.com/docker/engine-api/client/network_remove.go b/vendor/github.com/docker/engine-api/client/network_remove.go new file mode 100644 index 00000000..6bd67489 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/network_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/client/privileged.go b/vendor/github.com/docker/engine-api/client/privileged.go new file mode 100644 index 00000000..945f18ce --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/privileged.go @@ -0,0 +1,9 @@ +package client + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) diff --git a/vendor/github.com/docker/engine-api/client/request.go b/vendor/github.com/docker/engine-api/client/request.go new file mode 100644 index 00000000..4ad2d24b --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/request.go @@ -0,0 +1,180 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/docker/engine-api/client/transport/cancellable" + "golang.org/x/net/context" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) +} + +// getWithContext sends an http request to the docker API using the method GET with a specific go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "GET", path, query, nil, headers) +} + +// postWithContext sends an http request to the docker API using the method POST with a specific go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "POST", path, query, obj, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { + return cli.sendClientRequest(ctx, "POST", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "PUT", path, query, obj, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { + return cli.sendClientRequest(ctx, "PUT", path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { + var body io.Reader + + if obj != nil { + var err error + body, err = encodeData(obj) + if err != nil { + return nil, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + } + + return cli.sendClientRequest(ctx, method, path, query, body, headers) +} + +func (cli *Client) sendClientRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { + serverResp := &serverResponse{ + body: nil, + statusCode: -1, + } + + expectedPayload := (method == "POST" || method == "PUT") + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := cli.newRequest(method, path, query, body, headers) + req.URL.Host = cli.addr + req.URL.Scheme = cli.transport.Scheme() + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + + resp, err := cancellable.Do(ctx, cli.transport, req) + if resp != nil { + serverResp.statusCode = resp.StatusCode + } + + if err != nil { + if isTimeout(err) || strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrConnectionFailed + } + + if !cli.transport.Secure() && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + if cli.transport.Secure() && strings.Contains(err.Error(), "remote error: bad certificate") { + return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + } + + return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err) + } + + if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return serverResp, err + } + if len(body) == 0 { + return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) + } + return serverResp, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body)) + } + + serverResp.body = resp.Body + serverResp.header = resp.Header + return serverResp, nil +} + +func (cli *Client) newRequest(method, path string, query url.Values, body io.Reader, headers map[string][]string) (*http.Request, error) { + apiPath := cli.getAPIPath(path, query) + req, err := http.NewRequest(method, apiPath, body) + if err != nil { + return nil, err + } + + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + req.Header.Set(k, v) + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + + return req, nil +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response *serverResponse) { + if response != nil && response.body != nil { + response.body.Close() + } +} + +func isTimeout(err error) bool { + type timeout interface { + Timeout() bool + } + e := err + switch urlErr := err.(type) { + case *url.Error: + e = urlErr.Err + } + t, ok := e.(timeout) + return ok && t.Timeout() +} diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go new file mode 100644 index 00000000..11dff600 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package cancellable + +import ( + "net/http" + + "github.com/docker/engine-api/client/transport" +) + +func canceler(client transport.Sender, req *http.Request) func() { + // TODO(djd): Respect any existing value of req.Cancel. + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go new file mode 100644 index 00000000..8ff2845c --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go @@ -0,0 +1,27 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package cancellable + +import ( + "net/http" + + "github.com/docker/engine-api/client/transport" +) + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +func canceler(client transport.Sender, req *http.Request) func() { + rc, ok := client.(requestCanceler) + if !ok { + return func() {} + } + return func() { + rc.CancelRequest(req) + } +} diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go new file mode 100644 index 00000000..526feb0f --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go @@ -0,0 +1,113 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cancellable provides helper function to cancel http requests. +package cancellable + +import ( + "io" + "net/http" + + "github.com/docker/engine-api/client/transport" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided transport.Sender and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +// +// FORK INFORMATION: +// +// This function deviates from the upstream version in golang.org/x/net/context/ctxhttp by +// taking a Sender interface rather than a *http.Client directly. That allow us to use +// this funcion with mocked clients and hijacked connections. +func Do(ctx context.Context, client transport.Sender, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // Request cancelation changed in Go 1.5, see canceler.go and canceler_go14.go. + cancel := canceler(client, req) + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + cancel() + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil && r.resp.Body != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cancel() + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/github.com/docker/engine-api/client/transport/client.go b/vendor/github.com/docker/engine-api/client/transport/client.go new file mode 100644 index 00000000..13d4b3ab --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/transport/client.go @@ -0,0 +1,47 @@ +package transport + +import ( + "crypto/tls" + "net/http" +) + +// Sender is an interface that clients must implement +// to be able to send requests to a remote connection. +type Sender interface { + // Do sends request to a remote endpoint. + Do(*http.Request) (*http.Response, error) +} + +// Client is an interface that abstracts all remote connections. +type Client interface { + Sender + // Secure tells whether the connection is secure or not. + Secure() bool + // Scheme returns the connection protocol the client uses. + Scheme() string + // TLSConfig returns any TLS configuration the client uses. + TLSConfig() *tls.Config +} + +// tlsInfo returns information about the TLS configuration. +type tlsInfo struct { + tlsConfig *tls.Config +} + +// TLSConfig returns the TLS configuration. +func (t *tlsInfo) TLSConfig() *tls.Config { + return t.tlsConfig +} + +// Scheme returns protocol scheme to use. +func (t *tlsInfo) Scheme() string { + if t.tlsConfig != nil { + return "https" + } + return "http" +} + +// Secure returns true if there is a TLS configuration. +func (t *tlsInfo) Secure() bool { + return t.tlsConfig != nil +} diff --git a/vendor/github.com/docker/engine-api/client/transport/transport.go b/vendor/github.com/docker/engine-api/client/transport/transport.go new file mode 100644 index 00000000..ff28af18 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/transport/transport.go @@ -0,0 +1,57 @@ +// Package transport provides function to send request to remote endpoints. +package transport + +import ( + "fmt" + "net/http" + + "github.com/docker/go-connections/sockets" +) + +// apiTransport holds information about the http transport to connect with the API. +type apiTransport struct { + *http.Client + *tlsInfo + transport *http.Transport +} + +// NewTransportWithHTTP creates a new transport based on the provided proto, address and http client. +// It uses Docker's default http transport configuration if the client is nil. +// It does not modify the client's transport if it's not nil. +func NewTransportWithHTTP(proto, addr string, client *http.Client) (Client, error) { + var transport *http.Transport + + if client != nil { + tr, ok := client.Transport.(*http.Transport) + if !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) + } + transport = tr + } else { + transport = defaultTransport(proto, addr) + client = &http.Client{ + Transport: transport, + } + } + + return &apiTransport{ + Client: client, + tlsInfo: &tlsInfo{transport.TLSClientConfig}, + transport: transport, + }, nil +} + +// CancelRequest stops a request execution. +func (a *apiTransport) CancelRequest(req *http.Request) { + a.transport.CancelRequest(req) +} + +// defaultTransport creates a new http.Transport with Docker's +// default transport configuration. +func defaultTransport(proto, addr string) *http.Transport { + tr := new(http.Transport) + sockets.ConfigureTransport(tr, proto, addr) + return tr +} + +var _ Client = &apiTransport{} diff --git a/vendor/github.com/docker/engine-api/client/version.go b/vendor/github.com/docker/engine-api/client/version.go new file mode 100644 index 00000000..e037551a --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/version.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + ensureReaderClosed(resp) + return server, err +} diff --git a/vendor/github.com/docker/engine-api/client/volume_create.go b/vendor/github.com/docker/engine-api/client/volume_create.go new file mode 100644 index 00000000..cc1e1c17 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/volume_create.go @@ -0,0 +1,20 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) { + var volume types.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + if err != nil { + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + ensureReaderClosed(resp) + return volume, err +} diff --git a/vendor/github.com/docker/engine-api/client/volume_inspect.go b/vendor/github.com/docker/engine-api/client/volume_inspect.go new file mode 100644 index 00000000..4bf4a7b0 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/volume_inspect.go @@ -0,0 +1,24 @@ +package client + +import ( + "encoding/json" + "net/http" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + var volume types.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return volume, volumeNotFoundError{volumeID} + } + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + ensureReaderClosed(resp) + return volume, err +} diff --git a/vendor/github.com/docker/engine-api/client/volume_list.go b/vendor/github.com/docker/engine-api/client/volume_list.go new file mode 100644 index 00000000..bb4c40d5 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/volume_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) { + var volumes types.VolumesListResponse + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParam(filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + ensureReaderClosed(resp) + return volumes, err +} diff --git a/vendor/github.com/docker/engine-api/client/volume_remove.go b/vendor/github.com/docker/engine-api/client/volume_remove.go new file mode 100644 index 00000000..0dce24c7 --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/volume_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string) error { + resp, err := cli.delete(ctx, "/volumes/"+volumeID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/engine-api/types/auth.go b/vendor/github.com/docker/engine-api/types/auth.go new file mode 100644 index 00000000..056af6b8 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/auth.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go b/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go new file mode 100644 index 00000000..458a9c96 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev + +import "fmt" + +// WeightDevice is a structure that hold device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that hold device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/vendor/github.com/docker/engine-api/types/client.go b/vendor/github.com/docker/engine-api/types/client.go new file mode 100644 index 00000000..f09ad02e --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/client.go @@ -0,0 +1,236 @@ +package types + +import ( + "bufio" + "io" + "net" + + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/filters" + "github.com/docker/go-units" +) + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + ContainerID string + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + ContainerID string + RepositoryName string + Tag string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string + ContainerID string + Running bool + ExitCode int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filter filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ContainerID string + ShowStdout bool + ShowStderr bool + Since string + Timestamps bool + Follow bool + Tail string +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + ContainerID string + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + ContainerID string + Path string + Content io.Reader + AllowOverwriteDirWithFile bool +} + +// EventsOptions hold parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implement structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + BuildArgs map[string]string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string +} + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + Parent string // Parent is the name of the image to pull + Tag string // Tag is the name to tag this image with + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) + SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) + RepositoryName string // RepositoryName is the name of the repository to import this image into + Message string // Message is the message to tag the image with + Tag string // Tag is the name to tag this image with + Changes []string // Changes are the raw changes to apply to this image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + MatchName string + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + ImageID string // ImageID is the name of the image to pull + Tag string // Tag is the name of the tag to be pulled + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +//ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + ImageID string + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + Term string + RegistryAuth string +} + +// ImageTagOptions holds parameters to tag an image +type ImageTagOptions struct { + ImageID string + RepositoryName string + Tag string + Force bool +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + ID string + Height int + Width int +} + +// VersionResponse holds version information for the client and the server +type VersionResponse struct { + Client *Version + Server *Version +} + +// ServerOK return true when the client could connect to the docker server +// and parse the information received. It returns false otherwise. +func (v VersionResponse) ServerOK() bool { + return v.Server != nil +} diff --git a/vendor/github.com/docker/engine-api/types/configs.go b/vendor/github.com/docker/engine-api/types/configs.go new file mode 100644 index 00000000..6874a037 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/configs.go @@ -0,0 +1,54 @@ +package types + +import ( + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/network" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ContainerCommitConfig contains build configs for commit operation, +// and is used when making a commit with the current state of the container. +type ContainerCommitConfig struct { + Pause bool + Repo string + Tag string + Author string + Comment string + // merge container config into commit config before commit + MergeConfigs bool + Config *container.Config +} + +// ExecConfig is a small subset of the Config struct that hold the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + Container string // Name of the container (to execute in) + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard output + AttachStdout bool // Attach the standard error + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Cmd []string // Execution commands and args +} diff --git a/vendor/github.com/docker/engine-api/types/container/config.go b/vendor/github.com/docker/engine-api/types/container/config.go new file mode 100644 index 00000000..b8747a50 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/container/config.go @@ -0,0 +1,38 @@ +package container + +import ( + "github.com/docker/engine-api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports + PublishService string `json:",omitempty"` // Name of the network service exposed by the container + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (eg. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container +} diff --git a/vendor/github.com/docker/engine-api/types/container/host_config.go b/vendor/github.com/docker/engine-api/types/container/host_config.go new file mode 100644 index 00000000..a1b503f8 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/container/host_config.go @@ -0,0 +1,299 @@ +package container + +import ( + "strings" + + "github.com/docker/engine-api/types/blkiodev" + "github.com/docker/engine-api/types/strslice" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" +) + +// NetworkMode represents the container network stack. +type NetworkMode string + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses it's private ipc stack. +func (n IpcMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's ipc stack. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's ipc stack. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the ipc stack is valid. +func (n IpcMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// Cgroup Spec represents the cgroup to use for the container. +type CgroupSpec string + +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses it's private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid stack of the container. +type PidMode string + +// IsPrivate indicates whether the container uses it's private pid stack. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's pid stack. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the pid stack is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the contain will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DiskQuota int64 // Disk limit (in bytes) + KernelMemory int64 // Kernel memory limit (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit int64 // Setting pids limit for a container + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + BlkioIOps uint64 // Maximum IOps for the container system drive + BlkioBps uint64 // Maximum Bytes per second for the container system drive + SandboxSize uint64 // System drive will be expanded to at least this size (in bytes) +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + + // Applicable to Windows + ConsoleSize [2]int // Initial console size + Isolation Isolation // Isolation technology of the container (eg default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources +} diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go new file mode 100644 index 00000000..4171059a --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go @@ -0,0 +1,81 @@ +// +build !windows + +package container + +import "strings" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// IsPrivate indicates whether container uses it's private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go new file mode 100644 index 00000000..5726a77e --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go @@ -0,0 +1,87 @@ +package container + +import ( + "strings" +) + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsContainer indicates whether container uses a container network stack. +// Returns false as windows doesn't support this mode +func (n NetworkMode) IsContainer() bool { + return false +} + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsPrivate indicates whether container uses it's private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// ConnectedContainer is the id of the container which network this container is connected to. +// Returns blank string on windows +func (n NetworkMode) ConnectedContainer() string { + return "" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/vendor/github.com/docker/engine-api/types/events/events.go b/vendor/github.com/docker/engine-api/types/events/events.go new file mode 100644 index 00000000..c5987aaf --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/events/events.go @@ -0,0 +1,38 @@ +package events + +const ( + // ContainerEventType is the event type that containers generate + ContainerEventType = "container" + // ImageEventType is the event type that images generate + ImageEventType = "image" + // VolumeEventType is the event type that volumes generate + VolumeEventType = "volume" + // NetworkEventType is the event type that networks generate + NetworkEventType = "network" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set or attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + Type string + Action string + Actor Actor + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/vendor/github.com/docker/engine-api/types/filters/parse.go b/vendor/github.com/docker/engine-api/types/filters/parse.go new file mode 100644 index 00000000..9c80b1ed --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/filters/parse.go @@ -0,0 +1,257 @@ +// Package filters provides helper function to parse and handle command line +// filter, used for example in docker ps or docker images commands. +package filters + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" +) + +// Args stores filter arguments as map key:{map key: bool}. +// It contains a aggregation of the map of arguments (which are in the form +// of -f 'key=value') based on the key, and store values for the same key +// in an map with string keys and boolean values. +// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' +// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} +type Args struct { + fields map[string]map[string]bool +} + +// NewArgs initializes a new Args struct. +func NewArgs() Args { + return Args{fields: map[string]map[string]bool{}} +} + +// ParseFlag parses the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + + filters.Add(name, value) + + return filters, nil +} + +// ErrBadFormat is an error returned in case of bad format for a filter. +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam packs the Args into an string for easy transport from client to server. +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + buf, err := json.Marshal(a.fields) + if err != nil { + return "", err + } + return string(buf), nil +} + +// FromParam unpacks the filter Args. +func FromParam(p string) (Args, error) { + if len(p) == 0 { + return NewArgs(), nil + } + + r := strings.NewReader(p) + d := json.NewDecoder(r) + + m := map[string]map[string]bool{} + if err := d.Decode(&m); err != nil { + r.Seek(0, 0) + + // Allow parsing old arguments in slice format. + // Because other libraries might be sending them in this format. + deprecated := map[string][]string{} + if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { + m = deprecatedArgs(deprecated) + } else { + return NewArgs(), err + } + } + return Args{m}, nil +} + +// Get returns the list of values associates with a field. +// It returns a slice of strings to keep backwards compatibility with old code. +func (filters Args) Get(field string) []string { + values := filters.fields[field] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add adds a new value to a filter field. +func (filters Args) Add(name, value string) { + if _, ok := filters.fields[name]; ok { + filters.fields[name][value] = true + } else { + filters.fields[name] = map[string]bool{value: true} + } +} + +// Del removes a value from a filter field. +func (filters Args) Del(name, value string) { + if _, ok := filters.fields[name]; ok { + delete(filters.fields[name], value) + } +} + +// Len returns the number of fields in the arguments. +func (filters Args) Len() int { + return len(filters.fields) +} + +// MatchKVList returns true if the values for the specified field matches the ones +// from the sources. +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'label' and sources are {'label1': '1', 'label2': '2'} +// it returns true. +func (filters Args) MatchKVList(field string, sources map[string]string) bool { + fieldValues := filters.fields[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if sources == nil || len(sources) == 0 { + return false + } + + for name2match := range fieldValues { + testKV := strings.SplitN(name2match, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if the values for the specified field matches the source string +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'image.name' and source is 'ubuntu' +// it returns true. +func (filters Args) Match(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the filters. +func (filters Args) ExactMatch(field, source string) bool { + fieldValues, ok := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to march full name value to avoid O(N) regular expression matching + if fieldValues[source] { + return true + } + return false +} + +// FuzzyMatch returns true if the source matches exactly one of the filters, +// or the source has one of the filters as a prefix. +func (filters Args) FuzzyMatch(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Include returns true if the name of the field to filter is in the filters. +func (filters Args) Include(field string) bool { + _, ok := filters.fields[field] + return ok +} + +// Validate ensures that all the fields in the filter are valid. +// It returns an error as soon as it finds an invalid field. +func (filters Args) Validate(accepted map[string]bool) error { + for name := range filters.fields { + if !accepted[name] { + return fmt.Errorf("Invalid filter '%s'", name) + } + } + return nil +} + +// WalkValues iterates over the list of filtered values for a field. +// It stops the iteration if it finds an error and it returns that error. +func (filters Args) WalkValues(field string, op func(value string) error) error { + if _, ok := filters.fields[field]; !ok { + return nil + } + for v := range filters.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} diff --git a/vendor/github.com/docker/engine-api/types/network/network.go b/vendor/github.com/docker/engine-api/types/network/network.go new file mode 100644 index 00000000..bce60f5e --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/network/network.go @@ -0,0 +1,52 @@ +package network + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string //Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} diff --git a/vendor/github.com/docker/engine-api/types/registry/registry.go b/vendor/github.com/docker/engine-api/types/registry/registry.go new file mode 100644 index 00000000..4fcf986e --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/registry/registry.go @@ -0,0 +1,101 @@ +package registry + +import ( + "encoding/json" + "net" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial indicates whether the result is an official repository or not + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsOfficial indicates whether the result is trusted + IsTrusted bool `json:"is_trusted"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} diff --git a/vendor/github.com/docker/engine-api/types/seccomp.go b/vendor/github.com/docker/engine-api/types/seccomp.go new file mode 100644 index 00000000..e0305a9e --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/seccomp.go @@ -0,0 +1,68 @@ +package types + +// Seccomp represents the config for a seccomp profile for syscall restriction. +type Seccomp struct { + DefaultAction Action `json:"defaultAction"` + Architectures []Arch `json:"architectures"` + Syscalls []*Syscall `json:"syscalls"` +} + +// Arch used for additional architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" +) + +// Action taken upon Seccomp rule match +type Action string + +// Define actions for Seccomp rules +const ( + ActKill Action = "SCMP_ACT_KILL" + ActTrap Action = "SCMP_ACT_TRAP" + ActErrno Action = "SCMP_ACT_ERRNO" + ActTrace Action = "SCMP_ACT_TRACE" + ActAllow Action = "SCMP_ACT_ALLOW" +) + +// Operator used to match syscall arguments in Seccomp +type Operator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual Operator = "SCMP_CMP_NE" + OpLessThan Operator = "SCMP_CMP_LT" + OpLessEqual Operator = "SCMP_CMP_LE" + OpEqualTo Operator = "SCMP_CMP_EQ" + OpGreaterEqual Operator = "SCMP_CMP_GE" + OpGreaterThan Operator = "SCMP_CMP_GT" + OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" +) + +// Arg used for matching specific syscall arguments in Seccomp +type Arg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo"` + Op Operator `json:"op"` +} + +// Syscall is used to match a syscall in Seccomp +type Syscall struct { + Name string `json:"name"` + Action Action `json:"action"` + Args []*Arg `json:"args"` +} diff --git a/vendor/github.com/docker/engine-api/types/stats.go b/vendor/github.com/docker/engine-api/types/stats.go new file mode 100644 index 00000000..55081ae4 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/stats.go @@ -0,0 +1,112 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hit its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds. + TotalUsage uint64 `json:"total_usage"` + // Total CPU time consumed per core. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage"` + // Time spent by tasks of the cgroup in kernel mode. + // Units: nanoseconds. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + // Time spent by tasks of the cgroup in user mode. + // Units: nanoseconds. + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + CPUUsage CPUUsage `json:"cpu_usage"` + SystemUsage uint64 `json:"system_cpu_usage"` + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates All memory stats since container inception +type MemoryStats struct { + // current res_counter usage for memory + Usage uint64 `json:"usage"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt"` + Limit uint64 `json:"limit"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// TODO Windows: This can be factored out +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write +// TODO Windows: This can be factored out +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// NetworkStats aggregates All network stats of one container +// TODO Windows: This will require refactoring +type NetworkStats struct { + RxBytes uint64 `json:"rx_bytes"` + RxPackets uint64 `json:"rx_packets"` + RxErrors uint64 `json:"rx_errors"` + RxDropped uint64 `json:"rx_dropped"` + TxBytes uint64 `json:"tx_bytes"` + TxPackets uint64 `json:"tx_packets"` + TxErrors uint64 `json:"tx_errors"` + TxDropped uint64 `json:"tx_dropped"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + Read time.Time `json:"read"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` + CPUStats CPUStats `json:"cpu_stats,omitempty"` + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + PidsStats PidsStats `json:"pids_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/vendor/github.com/docker/engine-api/types/strslice/strslice.go b/vendor/github.com/docker/engine-api/types/strslice/strslice.go new file mode 100644 index 00000000..bad493fb --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/docker/engine-api/types/time/timestamp.go b/vendor/github.com/docker/engine-api/types/time/timestamp.go new file mode 100644 index 00000000..d3695ba7 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/time/timestamp.go @@ -0,0 +1,124 @@ +package time + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + var parseInLocation bool + + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseonds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/vendor/github.com/docker/engine-api/types/types.go b/vendor/github.com/docker/engine-api/types/types.go new file mode 100644 index 00000000..18cfbdda --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/types.go @@ -0,0 +1,453 @@ +package types + +import ( + "os" + "time" + + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/network" + "github.com/docker/engine-api/types/registry" + "github.com/docker/go-connections/nat" +) + +// ContainerCreateResponse contains the information returned to a client on the +// creation of a new container. +type ContainerCreateResponse struct { + // ID is the ID of the created container. + ID string `json:"Id"` + + // Warnings are any warnings encountered during the creation of the container. + Warnings []string `json:"Warnings"` +} + +// ContainerExecCreateResponse contains response of Remote API: +// POST "/containers/{name:.*}/exec" +type ContainerExecCreateResponse struct { + // ID is the exec ID. + ID string `json:"Id"` +} + +// ContainerUpdateResponse contains response of Remote API: +// POST /containers/{name:.*}/update +type ContainerUpdateResponse struct { + // Warnings are any warnings encountered during the updating of the container. + Warnings []string `json:"Warnings"` +} + +// AuthResponse contains response of Remote API: +// POST "/auth" +type AuthResponse struct { + // Status is the authentication status + Status string `json:"Status"` + + // IdentityToken is an opaque token used for authenticating + // a user after a successful login. + IdentityToken string `json:"IdentityToken,omitempty"` +} + +// ContainerWaitResponse contains response of Remote API: +// POST "/containers/"+containerID+"/wait" +type ContainerWaitResponse struct { + // StatusCode is the status code of the wait job + StatusCode int `json:"StatusCode"` +} + +// ContainerCommitResponse contains response of Remote API: +// POST "/commit?container="+containerID +type ContainerCommitResponse struct { + ID string `json:"Id"` +} + +// ContainerChange contains response of Remote API: +// GET "/containers/{name:.*}/changes" +type ContainerChange struct { + Kind int + Path string +} + +// ImageHistory contains response of Remote API: +// GET "/images/{name:.*}/history" +type ImageHistory struct { + ID string `json:"Id"` + Created int64 + CreatedBy string + Tags []string + Size int64 + Comment string +} + +// ImageDelete contains response of Remote API: +// DELETE "/images/{name:.*}" +type ImageDelete struct { + Untagged string `json:",omitempty"` + Deleted string `json:",omitempty"` +} + +// Image contains response of Remote API: +// GET "/images/json" +type Image struct { + ID string `json:"Id"` + ParentID string `json:"ParentId"` + RepoTags []string + RepoDigests []string + Created int64 + Size int64 + VirtualSize int64 + Labels map[string]string +} + +// GraphDriverData returns Image's graph driver config info +// when calling inspect command +type GraphDriverData struct { + Name string + Data map[string]string +} + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Remote API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Os string + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS +} + +// Port stores open ports info of container +// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"} +type Port struct { + IP string `json:",omitempty"` + PrivatePort int + PublicPort int `json:",omitempty"` + Type string +} + +// Container contains response of Remote API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Remote API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerProcessList contains response of Remote API: +// GET "/containers/{name:.*}/top" +type ContainerProcessList struct { + Processes [][]string + Titles []string +} + +// Version contains response of Remote API: +// GET "/version" +type Version struct { + Version string + APIVersion string `json:"ApiVersion"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Info contains response of Remote API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + ExecutionDriver string + LoggingDriver string + CgroupDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string + ClusterAdvertise string +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string +} + +// ContainerJSONBase contains response of Remote API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Name string + RestartCount int + Driver string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string + Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + IPAddress string + IPPrefixLen int + IPv6Gateway string + MacAddress string +} + +// MountPoint represents a mount point configuration inside the container. +type MountPoint struct { + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation string +} + +// Volume represents the configuration of a volume for the remote API +type Volume struct { + Name string // Name is the name of the volume + Driver string // Driver is the Driver name used to create the volume + Mountpoint string // Mountpoint is the location on disk of the volume + Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume + Labels map[string]string // Labels is metadata specific to the volume +} + +// VolumesListResponse contains the response for the remote API: +// GET "/volumes" +type VolumesListResponse struct { + Volumes []*Volume // Volumes is the list of volumes being returned + Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers +} + +// VolumeCreateRequest contains the response for the remote API: +// POST "/volumes/create" +type VolumeCreateRequest struct { + Name string // Name is the requested name of the volume + Driver string // Driver is the name of the driver that should be used to create the volume + DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume. + Labels map[string]string // Labels holds metadata specific to the volume being created. +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string + ID string `json:"Id"` + Scope string + Driver string + EnableIPv6 bool + IPAM network.IPAM + Internal bool + Containers map[string]EndpointResource + Options map[string]string + Labels map[string]string +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + Name string + CheckDuplicate bool + Driver string + EnableIPv6 bool + IPAM network.IPAM + Internal bool + Options map[string]string + Labels map[string]string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} diff --git a/vendor/github.com/docker/engine-api/types/versions/README.md b/vendor/github.com/docker/engine-api/types/versions/README.md new file mode 100644 index 00000000..76c516e6 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/versions/README.md @@ -0,0 +1,14 @@ +## Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +### Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains abount it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/engine-api/types/versions/v1p19/types.go b/vendor/github.com/docker/engine-api/types/versions/v1p19/types.go new file mode 100644 index 00000000..4ed43358 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/versions/v1p19/types.go @@ -0,0 +1,35 @@ +// Package v1p19 provides specific API types for the API version 1, patch 19. +package v1p19 + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/versions/v1p20" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for APIs prior to 1.20. +// Note this is not used by the Windows daemon. +type ContainerJSON struct { + *types.ContainerJSONBase + Volumes map[string]string + VolumesRW map[string]bool + Config *ContainerConfig + NetworkSettings *v1p20.NetworkSettings +} + +// ContainerConfig is a backcompatibility struct for APIs prior to 1.20. +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string + Memory int64 + MemorySwap int64 + CPUShares int64 `json:"CpuShares"` + CPUSet string `json:"Cpuset"` +} diff --git a/vendor/github.com/docker/engine-api/types/versions/v1p20/types.go b/vendor/github.com/docker/engine-api/types/versions/v1p20/types.go new file mode 100644 index 00000000..ed800061 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/versions/v1p20/types.go @@ -0,0 +1,40 @@ +// Package v1p20 provides specific API types for the API version 1, patch 20. +package v1p20 + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for the API 1.20 +type ContainerJSON struct { + *types.ContainerJSONBase + Mounts []types.MountPoint + Config *ContainerConfig + NetworkSettings *NetworkSettings +} + +// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string +} + +// StatsJSON is a backcompatibility struct used in Stats for API prior to 1.21 +type StatsJSON struct { + types.Stats + Network types.NetworkStats `json:"network,omitempty"` +} + +// NetworkSettings is a backward compatible struct for APIs prior to 1.21 +type NetworkSettings struct { + types.NetworkSettingsBase + types.DefaultNetworkSettings +} diff --git a/vendor/github.com/docker/go-connections/CONTRIBUTING.md b/vendor/github.com/docker/go-connections/CONTRIBUTING.md new file mode 100644 index 00000000..926dcc93 --- /dev/null +++ b/vendor/github.com/docker/go-connections/CONTRIBUTING.md @@ -0,0 +1,55 @@ +# Contributing to Docker + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE new file mode 100644 index 00000000..b55b37bc --- /dev/null +++ b/vendor/github.com/docker/go-connections/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-connections/MAINTAINERS b/vendor/github.com/docker/go-connections/MAINTAINERS new file mode 100644 index 00000000..477be8b2 --- /dev/null +++ b/vendor/github.com/docker/go-connections/MAINTAINERS @@ -0,0 +1,27 @@ +# go-connections maintainers file +# +# This file describes who runs the docker/go-connections project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "calavera", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" diff --git a/vendor/github.com/docker/go-connections/README.md b/vendor/github.com/docker/go-connections/README.md new file mode 100644 index 00000000..d257e44f --- /dev/null +++ b/vendor/github.com/docker/go-connections/README.md @@ -0,0 +1,13 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-connections?status.svg)](https://godoc.org/github.com/docker/go-connections) + +# Introduction + +go-connections provides common package to work with network connections. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-connections) for examples and documentation. + +## License + +go-connections is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/docker/go-connections/circle.yml b/vendor/github.com/docker/go-connections/circle.yml new file mode 100644 index 00000000..8a82ee82 --- /dev/null +++ b/vendor/github.com/docker/go-connections/circle.yml @@ -0,0 +1,14 @@ +dependencies: + pre: + # setup ipv6 + - sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=0 net.ipv6.conf.default.disable_ipv6=0 net.ipv6.conf.all.disable_ipv6=0 + post: + # install golint + - go get github.com/golang/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/docker/pkg/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go similarity index 90% rename from vendor/github.com/docker/docker/pkg/nat/nat.go rename to vendor/github.com/docker/go-connections/nat/nat.go index 6595feb0..3d469165 100644 --- a/vendor/github.com/docker/docker/pkg/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -1,15 +1,11 @@ +// Package nat is a convenience package for manipulation of strings describing network ports. package nat -// nat is a convenience package for docker's manipulation of strings describing -// network ports. - import ( "fmt" "net" "strconv" "strings" - - "github.com/docker/docker/pkg/parsers" ) const ( @@ -39,7 +35,7 @@ func NewPort(proto, port string) (Port, error) { // Check for parsing issues on "port" now so we can avoid having // to check it later on. - portStartInt, portEndInt, err := ParsePortRange(port) + portStartInt, portEndInt, err := ParsePortRangeToInt(port) if err != nil { return "", err } @@ -62,12 +58,12 @@ func ParsePort(rawPort string) (int, error) { return int(port), nil } -// ParsePortRange parses the port range string and returns start/end ints -func ParsePortRange(rawPort string) (int, int, error) { +// ParsePortRangeToInt parses the port range string and returns start/end ints +func ParsePortRangeToInt(rawPort string) (int, int, error) { if len(rawPort) == 0 { return 0, 0, nil } - start, end, err := parsers.ParsePortRange(rawPort) + start, end, err := ParsePortRange(rawPort) if err != nil { return 0, 0, err } @@ -101,7 +97,7 @@ func (p Port) Int() int { // Range returns the start/end port numbers of a Port range as ints func (p Port) Range() (int, int, error) { - return ParsePortRange(p.Port()) + return ParsePortRangeToInt(p.Port()) } // SplitProtoPort splits a port in the format of proto/port @@ -150,7 +146,7 @@ func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, rawPort = fmt.Sprintf(":%s", rawPort) } - parts, err := parsers.PartParser(portSpecTemplate, rawPort) + parts, err := PartParser(portSpecTemplate, rawPort) if err != nil { return nil, nil, err } @@ -168,14 +164,14 @@ func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, return nil, nil, fmt.Errorf("No port specified: %s", rawPort) } - startPort, endPort, err := parsers.ParsePortRange(containerPort) + startPort, endPort, err := ParsePortRange(containerPort) if err != nil { return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) } var startHostPort, endHostPort uint64 = 0, 0 if len(hostPort) > 0 { - startHostPort, endHostPort, err = parsers.ParsePortRange(hostPort) + startHostPort, endHostPort, err = ParsePortRange(hostPort) if err != nil { return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) } diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go new file mode 100644 index 00000000..87205020 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/parse.go @@ -0,0 +1,56 @@ +package nat + +import ( + "fmt" + "strconv" + "strings" +) + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} diff --git a/vendor/github.com/docker/docker/pkg/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go similarity index 96% rename from vendor/github.com/docker/docker/pkg/nat/sort.go rename to vendor/github.com/docker/go-connections/nat/sort.go index 1eb0fedd..ce950171 100644 --- a/vendor/github.com/docker/docker/pkg/nat/sort.go +++ b/vendor/github.com/docker/go-connections/nat/sort.go @@ -3,8 +3,6 @@ package nat import ( "sort" "strings" - - "github.com/docker/docker/pkg/parsers" ) type portSorter struct { @@ -90,7 +88,7 @@ func SortPortMap(ports []Port, bindings PortMap) { } func toInt(s string) uint64 { - i, _, err := parsers.ParsePortRange(s) + i, _, err := ParsePortRange(s) if err != nil { i = 0 } diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go new file mode 100644 index 00000000..3395e402 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go @@ -0,0 +1,89 @@ +package sockets + +import ( + "errors" + "net" + "sync" +) + +var errClosed = errors.New("use of closed network connection") + +// InmemSocket implements net.Listener using in-memory only connections. +type InmemSocket struct { + chConn chan net.Conn + chClose chan struct{} + addr string + mu sync.Mutex +} + +// dummyAddr is used to satisfy net.Addr for the in-mem socket +// it is just stored as a string and returns the string for all calls +type dummyAddr string + +// NewInmemSocket creates an in-memory only net.Listener +// The addr argument can be any string, but is used to satisfy the `Addr()` part +// of the net.Listener interface +func NewInmemSocket(addr string, bufSize int) *InmemSocket { + return &InmemSocket{ + chConn: make(chan net.Conn, bufSize), + chClose: make(chan struct{}), + addr: addr, + } +} + +// Addr returns the socket's addr string to satisfy net.Listener +func (s *InmemSocket) Addr() net.Addr { + return dummyAddr(s.addr) +} + +// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. +func (s *InmemSocket) Accept() (net.Conn, error) { + select { + case conn := <-s.chConn: + return conn, nil + case <-s.chClose: + return nil, errClosed + } +} + +// Close closes the listener. It will be unavailable for use once closed. +func (s *InmemSocket) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + select { + case <-s.chClose: + default: + close(s.chClose) + } + return nil +} + +// Dial is used to establish a connection with the in-mem server +func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { + srvConn, clientConn := net.Pipe() + select { + case s.chConn <- srvConn: + case <-s.chClose: + return nil, errClosed + } + + return clientConn, nil +} + +// Network returns the addr string, satisfies net.Addr +func (a dummyAddr) Network() string { + return string(a) +} + +// String returns the string form +func (a dummyAddr) String() string { + return string(a) +} + +// timeoutError is used when there is a timeout with a connection +// this implements the net.Error interface +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go new file mode 100644 index 00000000..98e9a1dc --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -0,0 +1,51 @@ +package sockets + +import ( + "net" + "net/url" + "os" + "strings" + + "golang.org/x/net/proxy" +) + +// GetProxyEnv allows access to the uppercase and the lowercase forms of +// proxy-related variables. See the Go specification for details on these +// variables. https://golang.org/pkg/net/http/ +func GetProxyEnv(key string) string { + proxyValue := os.Getenv(strings.ToUpper(key)) + if proxyValue == "" { + return os.Getenv(strings.ToLower(key)) + } + return proxyValue +} + +// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a +// proxy.Dialer which will route the connections through the proxy using the +// given dialer. +func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { + allProxy := GetProxyEnv("all_proxy") + if len(allProxy) == 0 { + return direct, nil + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return direct, err + } + + proxyFromURL, err := proxy.FromURL(proxyURL, direct) + if err != nil { + return direct, err + } + + noProxy := GetProxyEnv("no_proxy") + if len(noProxy) == 0 { + return proxyFromURL, nil + } + + perHost := proxy.NewPerHost(proxyFromURL, direct) + perHost.AddFromString(noProxy) + + return perHost, nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go new file mode 100644 index 00000000..1739cecf --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -0,0 +1,42 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "net" + "net/http" + "time" +) + +// Why 32? See https://github.com/docker/docker/pull/8035. +const defaultTimeout = 32 * time.Second + +// ConfigureTransport configures the specified Transport according to the +// specified proto and addr. +// If the proto is unix (using a unix socket to communicate) or npipe the +// compression is disabled. +func ConfigureTransport(tr *http.Transport, proto, addr string) error { + switch proto { + case "unix": + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, defaultTimeout) + } + case "npipe": + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return DialPipe(addr, defaultTimeout) + } + default: + tr.Proxy = http.ProxyFromEnvironment + dialer, err := DialerFromEnvironment(&net.Dialer{ + Timeout: defaultTimeout, + }) + if err != nil { + return err + } + tr.Dial = dialer.Dial + } + return nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go new file mode 100644 index 00000000..b255ac9a --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -0,0 +1,15 @@ +// +build !windows + +package sockets + +import ( + "net" + "syscall" + "time" +) + +// DialPipe connects to a Windows named pipe. +// This is not supported on other OSes. +func DialPipe(_ string, _ time.Duration) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go new file mode 100644 index 00000000..1f3540b2 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -0,0 +1,13 @@ +package sockets + +import ( + "net" + "time" + + "github.com/Microsoft/go-winio" +) + +// DialPipe connects to a Windows named pipe. +func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { + return winio.DialPipe(addr, &timeout) +} diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go new file mode 100644 index 00000000..8a82727d --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go @@ -0,0 +1,22 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "crypto/tls" + "net" +) + +// NewTCPSocket creates a TCP socket listener with the specified address and +// and the specified tls configuration. If TLSConfig is set, will encapsulate the +// TCP listener inside a TLS one. +func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { + l, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + if tlsConfig != nil { + tlsConfig.NextProtos = []string{"http/1.1"} + l = tls.NewListener(l, tlsConfig) + } + return l, nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go new file mode 100644 index 00000000..d1627349 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -0,0 +1,80 @@ +// +build linux freebsd solaris + +package sockets + +import ( + "fmt" + "net" + "os" + "strconv" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/user" +) + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path, group string) (net.Listener, error) { + if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { + return nil, err + } + mask := syscall.Umask(0777) + defer syscall.Umask(mask) + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + if err := setSocketGroup(path, group); err != nil { + l.Close() + return nil, err + } + if err := os.Chmod(path, 0660); err != nil { + l.Close() + return nil, err + } + return l, nil +} + +func setSocketGroup(path, group string) error { + if group == "" { + return nil + } + if err := changeGroup(path, group); err != nil { + if group != "docker" { + return err + } + logrus.Debugf("Warning: could not change group %s to docker: %v", path, err) + } + return nil +} + +func changeGroup(path string, nameOrGid string) error { + gid, err := lookupGidByName(nameOrGid) + if err != nil { + return err + } + logrus.Debugf("%s group found. gid: %d", nameOrGid, gid) + return os.Chown(path, 0, gid) +} + +func lookupGidByName(nameOrGid string) (int, error) { + groupFile, err := user.GetGroupPath() + if err != nil { + return -1, err + } + groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { + return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid + }) + if err != nil { + return -1, err + } + if groups != nil && len(groups) > 0 { + return groups[0].Gid, nil + } + gid, err := strconv.Atoi(nameOrGid) + if err == nil { + logrus.Warnf("Could not find GID %d", gid) + return gid, nil + } + return -1, fmt.Errorf("Group %s not found", nameOrGid) +} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go similarity index 93% rename from vendor/github.com/docker/docker/pkg/tlsconfig/config.go rename to vendor/github.com/docker/go-connections/tlsconfig/config.go index e3dfad1f..9378c358 100644 --- a/vendor/github.com/docker/docker/pkg/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -41,12 +41,6 @@ var acceptedCBCCiphers = []uint16{ tls.TLS_RSA_WITH_AES_128_CBC_SHA, } -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} - // DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls // options struct but wants to use a commonly accepted set of TLS cipher suites, with // known weak algorithms removed. @@ -91,7 +85,7 @@ func certPool(caFile string) (*x509.CertPool, error) { func Client(options Options) (*tls.Config, error) { tlsConfig := ClientDefault tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify - if !options.InsecureSkipVerify { + if !options.InsecureSkipVerify && options.CAFile != "" { CAs, err := certPool(options.CAFile) if err != nil { return nil, err @@ -99,7 +93,7 @@ func Client(options Options) (*tls.Config, error) { tlsConfig.RootCAs = CAs } - if options.CertFile != "" && options.KeyFile != "" { + if options.CertFile != "" || options.KeyFile != "" { tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go new file mode 100644 index 00000000..6b4c6a7c --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -0,0 +1,17 @@ +// +build go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go new file mode 100644 index 00000000..ee22df47 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go @@ -0,0 +1,15 @@ +// +build !go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE new file mode 100644 index 00000000..b55b37bc --- /dev/null +++ b/vendor/github.com/docker/go-units/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md new file mode 100644 index 00000000..e2fb4051 --- /dev/null +++ b/vendor/github.com/docker/go-units/README.md @@ -0,0 +1,13 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## License + +go-units is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml new file mode 100644 index 00000000..9043b354 --- /dev/null +++ b/vendor/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get github.com/golang/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/docker/pkg/units/duration.go b/vendor/github.com/docker/go-units/duration.go similarity index 100% rename from vendor/github.com/docker/docker/pkg/units/duration.go rename to vendor/github.com/docker/go-units/duration.go diff --git a/vendor/github.com/docker/docker/pkg/units/size.go b/vendor/github.com/docker/go-units/size.go similarity index 100% rename from vendor/github.com/docker/docker/pkg/units/size.go rename to vendor/github.com/docker/go-units/size.go diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go similarity index 91% rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go rename to vendor/github.com/docker/go-units/ulimit.go index 8fb0d804..f0a7be29 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go +++ b/vendor/github.com/docker/go-units/ulimit.go @@ -1,6 +1,4 @@ -// Package ulimit provides structure and helper function to parse and represent -// resource limits (Rlimit and Ulimit, its human friendly version). -package ulimit +package units import ( "fmt" @@ -64,8 +62,8 @@ var ulimitNameMapping = map[string]int{ "stack": rlimitStack, } -// Parse parses and returns a Ulimit from the specified string. -func Parse(val string) (*Ulimit, error) { +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { parts := strings.SplitN(val, "=", 2) if len(parts) != 2 { return nil, fmt.Errorf("invalid ulimit argument: %s", val) diff --git a/vendor/github.com/docker/libcompose/.dockerignore b/vendor/github.com/docker/libcompose/.dockerignore index 6ee5de4e..07b91a6a 100644 --- a/vendor/github.com/docker/libcompose/.dockerignore +++ b/vendor/github.com/docker/libcompose/.dockerignore @@ -1 +1,2 @@ bundles/ +**/*.test diff --git a/vendor/github.com/docker/libcompose/.gitignore b/vendor/github.com/docker/libcompose/.gitignore index 81f42e87..bf76d4c9 100644 --- a/vendor/github.com/docker/libcompose/.gitignore +++ b/vendor/github.com/docker/libcompose/.gitignore @@ -6,3 +6,4 @@ *.swp bundles .gopath +.idea diff --git a/vendor/github.com/docker/libcompose/Dockerfile b/vendor/github.com/docker/libcompose/Dockerfile index 4faacae4..9c3de9ae 100644 --- a/vendor/github.com/docker/libcompose/Dockerfile +++ b/vendor/github.com/docker/libcompose/Dockerfile @@ -1,44 +1,56 @@ # This file describes the standard way to build libcompose, using docker -FROM golang:1.5.2 +FROM golang:1.6.2 -RUN apt-get update && apt-get install -y \ - iptables \ - build-essential \ - --no-install-recommends + +# virtualenv is necessary to run acceptance tests +RUN apt-get update && \ + apt-get install -y iptables build-essential --no-install-recommends && \ + apt-get install -y python-setuptools && \ + easy_install pip && pip install virtualenv # Install build dependencies -RUN go get github.com/mitchellh/gox -RUN go get github.com/aktau/github-release -RUN go get golang.org/x/tools/cmd/cover -RUN go get github.com/golang/lint/golint -RUN go get golang.org/x/tools/cmd/vet +RUN go get github.com/aktau/github-release && \ + go get golang.org/x/tools/cmd/cover && \ + go get github.com/golang/lint/golint -# Compile Go for cross compilation -ENV DOCKER_CROSSPLATFORMS \ - linux/386 linux/arm \ - darwin/amd64 darwin/386 \ - freebsd/amd64 freebsd/386 freebsd/arm \ - windows/amd64 windows/386 - -# Which docker version to test on -ENV DOCKER_VERSION 1.8.3 - -# enable GO15VENDOREXPERIMENT -ENV GO15VENDOREXPERIMENT 1 +# Which docker version to test on and what default one to use +ENV DOCKER_VERSIONS 1.9.1 1.10.3 1.11.0 +ENV DEFAULT_DOCKER_VERSION 1.10.3 # Download docker -RUN set -ex; \ - curl https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION} -o /usr/local/bin/docker-${DOCKER_VERSION}; \ - chmod +x /usr/local/bin/docker-${DOCKER_VERSION} +RUN set -e; \ + for v in $(echo ${DOCKER_VERSIONS} | cut -f1); do \ + if test "${v}" = "1.9.1" || test "${v}" = "1.10.3"; then \ + mkdir -p /usr/local/bin/docker-${v}/; \ + curl https://get.docker.com/builds/Linux/x86_64/docker-${v} -o /usr/local/bin/docker-${v}/docker; \ + chmod +x /usr/local/bin/docker-${v}/docker; \ + else \ + curl https://get.docker.com/builds/Linux/x86_64/docker-${v}.tgz -o docker-${v}.tgz; \ + tar xzf docker-${v}.tgz -C /usr/local/bin/; \ + mv /usr/local/bin/docker /usr/local/bin/docker-${v}; \ + rm docker-${v}.tgz; \ + fi \ + done # Set the default Docker to be run -RUN ln -s /usr/local/bin/docker-${DOCKER_VERSION} /usr/local/bin/docker +RUN ln -s /usr/local/bin/docker-${DEFAULT_DOCKER_VERSION} /usr/local/bin/docker + +WORKDIR /go/src/github.com/docker/libcompose + +# Compose COMMIT for acceptance test version, update that commit when +# you want to update the acceptance test version to support. +ENV COMPOSE_COMMIT e2cb7b0237085415ce48900309a61c73b5938520 +RUN virtualenv venv && \ + git clone https://github.com/docker/compose.git venv/compose && \ + cd venv/compose && \ + git checkout -q "$COMPOSE_COMMIT" && \ + ../bin/pip install \ + -r requirements.txt \ + -r requirements-dev.txt ENV COMPOSE_BINARY /go/src/github.com/docker/libcompose/libcompose-cli ENV USER root -WORKDIR /go/src/github.com/docker/libcompose - # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["script/dind"] diff --git a/vendor/github.com/docker/libcompose/Jenkinsfile b/vendor/github.com/docker/libcompose/Jenkinsfile new file mode 100644 index 00000000..3c89a644 --- /dev/null +++ b/vendor/github.com/docker/libcompose/Jenkinsfile @@ -0,0 +1,33 @@ + +wrappedNode(label: 'linux && x86_64') { + deleteDir() + checkout scm + def image + try { + stage "build image" + image = docker.build("dockerbuildbot/libcompose:${gitCommit()}") + + stage "validate" + makeTask(image, "validate") + + stage "test" + makeTask(image, "test", ["TESTVERBOSE=1", "DAEMON_VERSION=all"]) + + stage "build" + makeTask(image, "cross-binary") + } finally { + try { archive "bundles" } catch (Exception exc) {} + if (image) { sh "docker rmi ${image.id} ||:" } + } +} + +def makeTask(image, task, envVars=null) { + // could send in the full list of envVars for each call or provide default env vars like this: + withEnv((envVars ?: []) + ["LIBCOMPOSE_IMAGE=${image.id}"]) { // would need `def image` at top level of file instead of in the nested block + withChownWorkspace { + timeout(60) { + sh "make -e ${task}" + } + } + } +} diff --git a/vendor/github.com/docker/libcompose/Makefile b/vendor/github.com/docker/libcompose/Makefile index 5a333e8d..3ce74eec 100644 --- a/vendor/github.com/docker/libcompose/Makefile +++ b/vendor/github.com/docker/libcompose/Makefile @@ -15,44 +15,37 @@ LIBCOMPOSE_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.co GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) LIBCOMPOSE_IMAGE := libcompose-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH)) -DOCKER_RUN_LIBCOMPOSE := docker run --rm -it --privileged $(LIBCOMPOSE_ENVS) $(LIBCOMPOSE_MOUNT) "$(LIBCOMPOSE_IMAGE)" +DAEMON_VERSION := $(if $(DAEMON_VERSION),$(DAEMON_VERSION),"default") +TTY := $(shell [ -t 0 ] && echo "-t") +DOCKER_RUN_LIBCOMPOSE := docker run --rm -i $(TTY) --privileged -e DAEMON_VERSION="$(DAEMON_VERSION)" $(LIBCOMPOSE_ENVS) $(LIBCOMPOSE_MOUNT) "$(LIBCOMPOSE_IMAGE)" default: binary -all: build +all: build ## validate all checks, build linux binary, run all tests\ncross build non-linux binaries $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh -binary: build +binary: build ## build the linux binary $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh binary -test: build - $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh binary test-unit test-integration +cross-binary: build ## cross build the non linux binaries (windows, darwin) + $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh cross-binary -test-unit: build +test: build ## run the unit, integration and acceptance tests + $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh binary test-unit test-integration test-acceptance + +test-unit: build ## run the unit tests $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh test-unit -test-integration: build +test-integration: build ## run the integration tests $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh binary test-integration -validate-dco: build - $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh validate-dco +test-acceptance: build ## run the acceptance tests + $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh binary test-acceptance -validate-gofmt: build - $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh validate-gofmt - -validate-lint: build - $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh validate-lint - -validate-vet: build - $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh validate-vet - -validate-git-marks: build - $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh validate-git-marks - -validate: build +validate: build ## validate DCO, git conflicts marks, gofmt, golint and go vet $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh validate-dco validate-git-marks validate-gofmt validate-lint validate-vet -shell: build +shell: build ## start a shell inside the build env $(DOCKER_RUN_LIBCOMPOSE) bash # Build the docker image, should be prior almost any other goals @@ -65,3 +58,5 @@ bundles: clean: $(DOCKER_RUN_LIBCOMPOSE) ./script/make.sh clean +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) diff --git a/vendor/github.com/docker/libcompose/README.md b/vendor/github.com/docker/libcompose/README.md index d1a8bacf..2b209365 100644 --- a/vendor/github.com/docker/libcompose/README.md +++ b/vendor/github.com/docker/libcompose/README.md @@ -1,7 +1,7 @@ # libcompose [![GoDoc](https://godoc.org/github.com/docker/libcompose?status.png)](https://godoc.org/github.com/docker/libcompose) -[![Jenkins Build Status](https://jenkins.dockerproject.org/view/Libcompose/job/Libcompose%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Libcompose/job/Libcompose%20Master/) +[![Build Status](https://jenkins.dockerproject.org/job/docker/job/libcompose/branch/master/badge/icon)](https://jenkins.dockerproject.org/job/docker/job/libcompose/branch/master/) A Go library for Docker Compose. It does everything the command-line tool does, but from within Go -- read Compose files, start them, scale them, etc. @@ -13,23 +13,30 @@ package main import ( "log" + "golang.org/x/net/context" + "github.com/docker/libcompose/docker" "github.com/docker/libcompose/project" + "github.com/docker/libcompose/project/options" ) func main() { project, err := docker.NewProject(&docker.Context{ Context: project.Context{ - ComposeFile: "docker-compose.yml", - ProjectName: "my-compose", + ComposeFiles: []string{"docker-compose.yml"}, + ProjectName: "my-compose", }, - }) + }, nil) if err != nil { log.Fatal(err) } - project.Up() + err = project.Up(context.Background(), options.Up{}) + + if err != nil { + log.Fatal(err) + } } ``` @@ -67,8 +74,8 @@ libcompose-cli_linux-386* libcompose-cli_windows-386.exe* ### Building with `go` -- You need `go` v1.5 -- You need to set export `GO15VENDOREXPERIMENT=1` environment variable +- You need `go` v1.5 or greater +- If you are not using `go` v1.6, you need to set export `GO15VENDOREXPERIMENT=1` environment variable - If your working copy is not in your `GOPATH`, you need to set it accordingly. diff --git a/vendor/github.com/docker/libcompose/cli/app/app.go b/vendor/github.com/docker/libcompose/cli/app/app.go index b9cafcde..aed371dd 100644 --- a/vendor/github.com/docker/libcompose/cli/app/app.go +++ b/vendor/github.com/docker/libcompose/cli/app/app.go @@ -3,12 +3,17 @@ package app import ( "fmt" "os" + "os/signal" "strconv" "strings" + "syscall" + + "golang.org/x/net/context" "github.com/Sirupsen/logrus" "github.com/codegangsta/cli" "github.com/docker/libcompose/project" + "github.com/docker/libcompose/project/options" ) // ProjectAction is an adapter to allow the use of ordinary functions as libcompose actions. @@ -19,7 +24,7 @@ import ( // Usage: "List containers", // Action: app.WithProject(factory, app.ProjectPs), // } -type ProjectAction func(project *project.Project, c *cli.Context) +type ProjectAction func(project project.APIProject, c *cli.Context) error // BeforeApp is an action that is executed before any cli command. func BeforeApp(c *cli.Context) error { @@ -30,200 +35,268 @@ func BeforeApp(c *cli.Context) error { return nil } -// WithProject is an helper function to create a cli.Command action with a ProjectFactory. -func WithProject(factory ProjectFactory, action ProjectAction) func(context *cli.Context) { - return func(context *cli.Context) { +// WithProject is a helper function to create a cli.Command action with a ProjectFactory. +func WithProject(factory ProjectFactory, action ProjectAction) func(context *cli.Context) error { + return func(context *cli.Context) error { p, err := factory.Create(context) if err != nil { logrus.Fatalf("Failed to read project: %v", err) } - action(p, context) + return action(p, context) } } // ProjectPs lists the containers. -func ProjectPs(p *project.Project, c *cli.Context) { - allInfo := project.InfoSet{} +func ProjectPs(p project.APIProject, c *cli.Context) error { qFlag := c.Bool("q") - for name := range p.Configs { - service, err := p.CreateService(name) - if err != nil { - logrus.Fatal(err) - } - - info, err := service.Info(qFlag) - if err != nil { - logrus.Fatal(err) - } - - allInfo = append(allInfo, info...) + allInfo, err := p.Ps(context.Background(), qFlag, c.Args()...) + if err != nil { + return cli.NewExitError(err.Error(), 1) } - os.Stdout.WriteString(allInfo.String(!qFlag)) + return nil } // ProjectPort prints the public port for a port binding. -func ProjectPort(p *project.Project, c *cli.Context) { +func ProjectPort(p project.APIProject, c *cli.Context) error { if len(c.Args()) != 2 { - logrus.Fatalf("Please pass arguments in the form: SERVICE PORT") + return cli.NewExitError("Please pass arguments in the form: SERVICE PORT", 1) } index := c.Int("index") protocol := c.String("protocol") + serviceName := c.Args()[0] + privatePort := c.Args()[1] - service, err := p.CreateService(c.Args()[0]) + port, err := p.Port(context.Background(), index, protocol, serviceName, privatePort) if err != nil { - logrus.Fatal(err) + return cli.NewExitError(err.Error(), 1) } - - containers, err := service.Containers() - if err != nil { - logrus.Fatal(err) - } - - if index < 1 || index > len(containers) { - logrus.Fatalf("Invalid index %d", index) - } - - output, err := containers[index-1].Port(fmt.Sprintf("%s/%s", c.Args()[1], protocol)) - if err != nil { - logrus.Fatal(err) - } - fmt.Println(output) + fmt.Println(port) + return nil } -// ProjectDown brings all services down. -func ProjectDown(p *project.Project, c *cli.Context) { - err := p.Down(c.Args()...) +// ProjectStop stops all services. +func ProjectStop(p project.APIProject, c *cli.Context) error { + err := p.Stop(context.Background(), c.Int("timeout"), c.Args()...) if err != nil { - logrus.Fatal(err) + return cli.NewExitError(err.Error(), 1) } + return nil +} + +// ProjectDown brings all services down (stops and clean containers). +func ProjectDown(p project.APIProject, c *cli.Context) error { + options := options.Down{ + RemoveVolume: c.Bool("volumes"), + RemoveImages: options.ImageType(c.String("rmi")), + RemoveOrphans: c.Bool("remove-orphans"), + } + err := p.Down(context.Background(), options, c.Args()...) + if err != nil { + return cli.NewExitError(err.Error(), 1) + } + return nil } // ProjectBuild builds or rebuilds services. -func ProjectBuild(p *project.Project, c *cli.Context) { - err := p.Build(c.Args()...) - if err != nil { - logrus.Fatal(err) +func ProjectBuild(p project.APIProject, c *cli.Context) error { + config := options.Build{ + NoCache: c.Bool("no-cache"), + ForceRemove: c.Bool("force-rm"), + Pull: c.Bool("pull"), } + err := p.Build(context.Background(), config, c.Args()...) + if err != nil { + return cli.NewExitError(err.Error(), 1) + } + return nil } // ProjectCreate creates all services but do not start them. -func ProjectCreate(p *project.Project, c *cli.Context) { - err := p.Create(c.Args()...) - if err != nil { - logrus.Fatal(err) +func ProjectCreate(p project.APIProject, c *cli.Context) error { + options := options.Create{ + NoRecreate: c.Bool("no-recreate"), + ForceRecreate: c.Bool("force-recreate"), + NoBuild: c.Bool("no-build"), } + err := p.Create(context.Background(), options, c.Args()...) + if err != nil { + return cli.NewExitError(err.Error(), 1) + } + return nil } // ProjectUp brings all services up. -func ProjectUp(p *project.Project, c *cli.Context) { - err := p.Up(c.Args()...) +func ProjectUp(p project.APIProject, c *cli.Context) error { + options := options.Up{ + Create: options.Create{ + NoRecreate: c.Bool("no-recreate"), + ForceRecreate: c.Bool("force-recreate"), + NoBuild: c.Bool("no-build"), + }, + } + ctx, cancelFun := context.WithCancel(context.Background()) + err := p.Up(ctx, options, c.Args()...) if err != nil { - logrus.Fatal(err) + return cli.NewExitError(err.Error(), 1) + } + if !c.Bool("d") { + signalChan := make(chan os.Signal, 1) + cleanupDone := make(chan bool) + signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) + errChan := make(chan error) + go func() { + errChan <- p.Log(ctx, true, c.Args()...) + }() + go func() { + select { + case <-signalChan: + fmt.Printf("\nGracefully stopping...\n") + cancelFun() + ProjectStop(p, c) + cleanupDone <- true + case err := <-errChan: + if err != nil { + logrus.Fatal(err) + } + cleanupDone <- true + } + }() + <-cleanupDone + return nil + } + return nil +} + +// ProjectRun runs a given command within a service's container. +func ProjectRun(p project.APIProject, c *cli.Context) error { + if len(c.Args()) == 1 { + logrus.Fatal("No service specified") } - if !c.Bool("d") { - wait() + serviceName := c.Args()[0] + commandParts := c.Args()[1:] + + exitCode, err := p.Run(context.Background(), serviceName, commandParts) + if err != nil { + return cli.NewExitError(err.Error(), 1) } + return cli.NewExitError("", exitCode) } // ProjectStart starts services. -func ProjectStart(p *project.Project, c *cli.Context) { - err := p.Start(c.Args()...) +func ProjectStart(p project.APIProject, c *cli.Context) error { + err := p.Start(context.Background(), c.Args()...) if err != nil { - logrus.Fatal(err) + return cli.NewExitError(err.Error(), 1) } + return nil } // ProjectRestart restarts services. -func ProjectRestart(p *project.Project, c *cli.Context) { - err := p.Restart(c.Args()...) +func ProjectRestart(p project.APIProject, c *cli.Context) error { + err := p.Restart(context.Background(), c.Int("timeout"), c.Args()...) if err != nil { - logrus.Fatal(err) + return cli.NewExitError(err.Error(), 1) } + return nil } // ProjectLog gets services logs. -func ProjectLog(p *project.Project, c *cli.Context) { - err := p.Log(c.Args()...) +func ProjectLog(p project.APIProject, c *cli.Context) error { + err := p.Log(context.Background(), c.Bool("follow"), c.Args()...) if err != nil { - logrus.Fatal(err) + return cli.NewExitError(err.Error(), 1) } - wait() + return nil } // ProjectPull pulls images for services. -func ProjectPull(p *project.Project, c *cli.Context) { - err := p.Pull(c.Args()...) - if err != nil { - logrus.Fatal(err) +func ProjectPull(p project.APIProject, c *cli.Context) error { + err := p.Pull(context.Background(), c.Args()...) + if err != nil && !c.Bool("ignore-pull-failures") { + return cli.NewExitError(err.Error(), 1) } + return nil } -// ProjectDelete delete services. -func ProjectDelete(p *project.Project, c *cli.Context) { - if !c.Bool("force") && len(c.Args()) == 0 { - logrus.Fatal("Will not remove all services without --force") +// ProjectDelete deletes services. +func ProjectDelete(p project.APIProject, c *cli.Context) error { + options := options.Delete{ + RemoveVolume: c.Bool("v"), } - err := p.Delete(c.Args()...) + if !c.Bool("force") { + options.BeforeDeleteCallback = func(stoppedContainers []string) bool { + fmt.Printf("Going to remove %v\nAre you sure? [yN]\n", strings.Join(stoppedContainers, ", ")) + var answer string + _, err := fmt.Scanln(&answer) + if err != nil { + logrus.Error(err) + return false + } + if answer != "y" && answer != "Y" { + return false + } + return true + } + } + err := p.Delete(context.Background(), options, c.Args()...) if err != nil { - logrus.Fatal(err) + return cli.NewExitError(err.Error(), 1) } + return nil } // ProjectKill forces stop service containers. -func ProjectKill(p *project.Project, c *cli.Context) { - err := p.Kill(c.Args()...) +func ProjectKill(p project.APIProject, c *cli.Context) error { + err := p.Kill(context.Background(), c.String("signal"), c.Args()...) if err != nil { - logrus.Fatal(err) + return cli.NewExitError(err.Error(), 1) } + return nil +} + +// ProjectPause pauses service containers. +func ProjectPause(p project.APIProject, c *cli.Context) error { + err := p.Pause(context.Background(), c.Args()...) + if err != nil { + return cli.NewExitError(err.Error(), 1) + } + return nil +} + +// ProjectUnpause unpauses service containers. +func ProjectUnpause(p project.APIProject, c *cli.Context) error { + err := p.Unpause(context.Background(), c.Args()...) + if err != nil { + return cli.NewExitError(err.Error(), 1) + } + return nil } // ProjectScale scales services. -func ProjectScale(p *project.Project, c *cli.Context) { - // This code is a bit verbose but I wanted to parse everything up front - order := make([]string, 0, 0) - serviceScale := make(map[string]int) - services := make(map[string]project.Service) - +func ProjectScale(p project.APIProject, c *cli.Context) error { + servicesScale := map[string]int{} for _, arg := range c.Args() { kv := strings.SplitN(arg, "=", 2) if len(kv) != 2 { - logrus.Fatalf("Invalid scale parameter: %s", arg) + return cli.NewExitError(fmt.Sprintf("Invalid scale parameter: %s", arg), 2) } name := kv[0] count, err := strconv.Atoi(kv[1]) if err != nil { - logrus.Fatalf("Invalid scale parameter: %v", err) + return cli.NewExitError(fmt.Sprintf("Invalid scale parameter: %v", err), 2) } - if _, ok := p.Configs[name]; !ok { - logrus.Fatalf("%s is not defined in the template", name) - } - - service, err := p.CreateService(name) - if err != nil { - logrus.Fatalf("Failed to lookup service: %s: %v", service, err) - } - - order = append(order, name) - serviceScale[name] = count - services[name] = service + servicesScale[name] = count } - for _, name := range order { - scale := serviceScale[name] - logrus.Infof("Setting scale %s=%d...", name, scale) - err := services[name].Scale(scale) - if err != nil { - logrus.Fatalf("Failed to set the scale %s=%d: %v", name, scale, err) - } + err := p.Scale(context.Background(), c.Int("timeout"), servicesScale) + if err != nil { + return cli.NewExitError(err.Error(), 1) } -} - -func wait() { - <-make(chan interface{}) + return nil } diff --git a/vendor/github.com/docker/libcompose/cli/app/types.go b/vendor/github.com/docker/libcompose/cli/app/types.go index 71920c8a..a8c3f3ce 100644 --- a/vendor/github.com/docker/libcompose/cli/app/types.go +++ b/vendor/github.com/docker/libcompose/cli/app/types.go @@ -8,5 +8,5 @@ import ( // ProjectFactory is an interface that helps creating libcompose project. type ProjectFactory interface { // Create creates a libcompose project from the command line options (codegangsta cli context). - Create(c *cli.Context) (*project.Project, error) + Create(c *cli.Context) (project.APIProject, error) } diff --git a/vendor/github.com/docker/libcompose/cli/app/version.go b/vendor/github.com/docker/libcompose/cli/app/version.go new file mode 100644 index 00000000..0eef8d7d --- /dev/null +++ b/vendor/github.com/docker/libcompose/cli/app/version.go @@ -0,0 +1,52 @@ +package app + +import ( + "fmt" + "os" + "runtime" + "text/template" + + "github.com/Sirupsen/logrus" + "github.com/codegangsta/cli" + "github.com/docker/libcompose/version" +) + +var versionTemplate = `Version: {{.Version}} ({{.GitCommit}}) +Go version: {{.GoVersion}} +Built: {{.BuildTime}} +OS/Arch: {{.Os}}/{{.Arch}}` + +// Version prints the libcompose version number and additionnal informations. +func Version(c *cli.Context) { + if c.Bool("short") { + fmt.Println(version.VERSION) + return + } + + tmpl, err := template.New("").Parse(versionTemplate) + if err != nil { + logrus.Fatal(err) + } + + v := struct { + Version string + GitCommit string + GoVersion string + BuildTime string + Os string + Arch string + }{ + Version: version.VERSION, + GitCommit: version.GITCOMMIT, + GoVersion: runtime.Version(), + BuildTime: version.BUILDTIME, + Os: runtime.GOOS, + Arch: runtime.GOARCH, + } + + if err := tmpl.Execute(os.Stdout, v); err != nil { + logrus.Fatal(err) + } + fmt.Printf("\n") + return +} diff --git a/vendor/github.com/docker/libcompose/cli/command/command.go b/vendor/github.com/docker/libcompose/cli/command/command.go index d999abd3..a4ccff08 100644 --- a/vendor/github.com/docker/libcompose/cli/command/command.go +++ b/vendor/github.com/docker/libcompose/cli/command/command.go @@ -3,7 +3,6 @@ package command import ( "github.com/codegangsta/cli" "github.com/docker/libcompose/cli/app" - "github.com/docker/libcompose/project" ) // CreateCommand defines the libcompose create subcommand. @@ -12,6 +11,20 @@ func CreateCommand(factory app.ProjectFactory) cli.Command { Name: "create", Usage: "Create all services but do not start", Action: app.WithProject(factory, app.ProjectCreate), + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "no-recreate", + Usage: "If containers already exist, don't recreate them. Incompatible with --force-recreate.", + }, + cli.BoolFlag{ + Name: "force-recreate", + Usage: "Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate.", + }, + cli.BoolFlag{ + Name: "no-build", + Usage: "Don't build an image, even if it's missing.", + }, + }, } } @@ -21,6 +34,20 @@ func BuildCommand(factory app.ProjectFactory) cli.Command { Name: "build", Usage: "Build or rebuild services.", Action: app.WithProject(factory, app.ProjectBuild), + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "no-cache", + Usage: "Do not use cache when building the image", + }, + cli.BoolFlag{ + Name: "force-rm", + Usage: "Always remove intermediate containers", + }, + cli.BoolFlag{ + Name: "pull", + Usage: "Always attempt to pull a newer version of the image", + }, + }, } } @@ -71,6 +98,10 @@ func UpCommand(factory app.ProjectFactory) cli.Command { Name: "d", Usage: "Do not block and log", }, + cli.BoolFlag{ + Name: "no-build", + Usage: "Don't build an image, even if it's missing.", + }, cli.BoolFlag{ Name: "no-recreate", Usage: "If containers already exist, don't recreate them. Incompatible with --force-recreate.", @@ -98,12 +129,28 @@ func StartCommand(factory app.ProjectFactory) cli.Command { } } +// RunCommand defines the libcompose run subcommand. +func RunCommand(factory app.ProjectFactory) cli.Command { + return cli.Command{ + Name: "run", + Usage: "Run a one-off command", + Action: app.WithProject(factory, app.ProjectRun), + Flags: []cli.Flag{}, + } +} + // PullCommand defines the libcompose pull subcommand. func PullCommand(factory app.ProjectFactory) cli.Command { return cli.Command{ Name: "pull", Usage: "Pulls images for services", Action: app.WithProject(factory, app.ProjectPull), + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "ignore-pull-failures", + Usage: "Pull what it can and ignores images with pull failures.", + }, + }, } } @@ -119,6 +166,10 @@ func LogsCommand(factory app.ProjectFactory) cli.Command { Usage: "number of lines to tail", Value: 100, }, + cli.BoolFlag{ + Name: "follow", + Usage: "Follow log output.", + }, }, } } @@ -142,10 +193,9 @@ func RestartCommand(factory app.ProjectFactory) cli.Command { // StopCommand defines the libcompose stop subcommand. func StopCommand(factory app.ProjectFactory) cli.Command { return cli.Command{ - Name: "stop", - ShortName: "down", - Usage: "Stop services", - Action: app.WithProject(factory, app.ProjectDown), + Name: "stop", + Usage: "Stop services", + Action: app.WithProject(factory, app.ProjectStop), Flags: []cli.Flag{ cli.IntFlag{ Name: "timeout,t", @@ -156,6 +206,29 @@ func StopCommand(factory app.ProjectFactory) cli.Command { } } +// DownCommand defines the libcompose stop subcommand. +func DownCommand(factory app.ProjectFactory) cli.Command { + return cli.Command{ + Name: "down", + Usage: "Stop and remove containers, networks, images, and volumes", + Action: app.WithProject(factory, app.ProjectDown), + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "volumes,v", + Usage: "Remove data volumes", + }, + cli.StringFlag{ + Name: "rmi", + Usage: "Remove images, type may be one of: 'all' to remove all images, or 'local' to remove only images that don't have an custom name set by the `image` field", + }, + cli.BoolFlag{ + Name: "remove-orphans", + Usage: "Remove containers for services not defined in the Compose file", + }, + }, + } +} + // ScaleCommand defines the libcompose scale subcommand. func ScaleCommand(factory app.ProjectFactory) cli.Command { return cli.Command{ @@ -207,41 +280,57 @@ func KillCommand(factory app.ProjectFactory) cli.Command { } } +// PauseCommand defines the libcompose pause subcommand. +func PauseCommand(factory app.ProjectFactory) cli.Command { + return cli.Command{ + Name: "pause", + Usage: "Pause services.", + // ArgsUsage: "[SERVICE...]", + Action: app.WithProject(factory, app.ProjectPause), + } +} + +// UnpauseCommand defines the libcompose unpause subcommand. +func UnpauseCommand(factory app.ProjectFactory) cli.Command { + return cli.Command{ + Name: "unpause", + Usage: "Unpause services.", + // ArgsUsage: "[SERVICE...]", + Action: app.WithProject(factory, app.ProjectUnpause), + } +} + +// VersionCommand defines the libcompose version subcommand. +func VersionCommand(factory app.ProjectFactory) cli.Command { + return cli.Command{ + Name: "version", + Usage: "Show version informations", + Action: app.Version, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "short", + Usage: "Shows only Compose's version number.", + }, + }, + } +} + // CommonFlags defines the flags that are in common for all subcommands. func CommonFlags() []cli.Flag { return []cli.Flag{ cli.BoolFlag{ Name: "verbose,debug", }, - cli.StringFlag{ + cli.StringSliceFlag{ Name: "file,f", - Usage: "Specify an alternate compose file (default: docker-compose.yml)", - Value: "docker-compose.yml", + Usage: "Specify one or more alternate compose files (default: docker-compose.yml)", + Value: &cli.StringSlice{}, EnvVar: "COMPOSE_FILE", }, cli.StringFlag{ - Name: "project-name,p", - Usage: "Specify an alternate project name (default: directory name)", + Name: "project-name,p", + Usage: "Specify an alternate project name (default: directory name)", + EnvVar: "COMPOSE_PROJECT_NAME", }, } } - -// Populate updates the specified project context based on command line arguments and subcommands. -func Populate(context *project.Context, c *cli.Context) { - context.ComposeFile = c.GlobalString("file") - context.ProjectName = c.GlobalString("project-name") - - if c.Command.Name == "logs" { - context.Log = true - } else if c.Command.Name == "up" { - context.Log = !c.Bool("d") - context.NoRecreate = c.Bool("no-recreate") - context.ForceRecreate = c.Bool("force-recreate") - } else if c.Command.Name == "stop" || c.Command.Name == "restart" || c.Command.Name == "scale" { - context.Timeout = uint(c.Int("timeout")) - } else if c.Command.Name == "kill" { - context.Signal = c.Int("signal") - } else if c.Command.Name == "rm" { - context.Volume = c.Bool("v") - } -} diff --git a/vendor/github.com/docker/libcompose/cli/docker/app/commands.go b/vendor/github.com/docker/libcompose/cli/docker/app/commands.go index 88b891cd..40d2e7a2 100644 --- a/vendor/github.com/docker/libcompose/cli/docker/app/commands.go +++ b/vendor/github.com/docker/libcompose/cli/docker/app/commands.go @@ -4,6 +4,8 @@ import ( "github.com/Sirupsen/logrus" "github.com/codegangsta/cli" "github.com/docker/libcompose/docker" + "github.com/docker/libcompose/docker/client" + "github.com/docker/libcompose/project" ) // DockerClientFlags defines the flags that are specific to the docker client, @@ -42,14 +44,14 @@ func DockerClientFlags() []cli.Flag { func Populate(context *docker.Context, c *cli.Context) { context.ConfigDir = c.String("configdir") - opts := docker.ClientOpts{} + opts := client.Options{} opts.TLS = c.GlobalBool("tls") opts.TLSVerify = c.GlobalBool("tlsverify") opts.TLSOptions.CAFile = c.GlobalString("tlscacert") opts.TLSOptions.CertFile = c.GlobalString("tlscert") opts.TLSOptions.KeyFile = c.GlobalString("tlskey") - clientFactory, err := docker.NewDefaultClientFactory(opts) + clientFactory, err := project.NewDefaultClientFactory(opts) if err != nil { logrus.Fatalf("Failed to construct Docker client: %v", err) } diff --git a/vendor/github.com/docker/libcompose/cli/docker/app/factory.go b/vendor/github.com/docker/libcompose/cli/docker/app/factory.go index a1e4b2df..2614e529 100644 --- a/vendor/github.com/docker/libcompose/cli/docker/app/factory.go +++ b/vendor/github.com/docker/libcompose/cli/docker/app/factory.go @@ -1,23 +1,34 @@ package app import ( + "os" + "github.com/codegangsta/cli" - "github.com/docker/libcompose/cli/command" "github.com/docker/libcompose/cli/logger" "github.com/docker/libcompose/docker" "github.com/docker/libcompose/project" ) -// ProjectFactory is a struct that hold the app.ProjectFactory implementation. +// ProjectFactory is a struct that holds the app.ProjectFactory implementation. type ProjectFactory struct { } // Create implements ProjectFactory.Create using docker client. -func (p *ProjectFactory) Create(c *cli.Context) (*project.Project, error) { +func (p *ProjectFactory) Create(c *cli.Context) (project.APIProject, error) { context := &docker.Context{} context.LoggerFactory = logger.NewColorLoggerFactory() Populate(context, c) - command.Populate(&context.Context, c) - return docker.NewProject(context) + context.ComposeFiles = c.GlobalStringSlice("file") + + if len(context.ComposeFiles) == 0 { + context.ComposeFiles = []string{"docker-compose.yml"} + if _, err := os.Stat("docker-compose.override.yml"); err == nil { + context.ComposeFiles = append(context.ComposeFiles, "docker-compose.override.yml") + } + } + + context.ProjectName = c.GlobalString("project-name") + + return docker.NewProject(context, nil) } diff --git a/vendor/github.com/docker/libcompose/config/convert.go b/vendor/github.com/docker/libcompose/config/convert.go new file mode 100644 index 00000000..9cb8c5b1 --- /dev/null +++ b/vendor/github.com/docker/libcompose/config/convert.go @@ -0,0 +1,41 @@ +package config + +import "github.com/docker/libcompose/utils" + +// ConvertServices converts a set of v1 service configs to v2 service configs +func ConvertServices(v1Services map[string]*ServiceConfigV1) (map[string]*ServiceConfig, error) { + v2Services := make(map[string]*ServiceConfig) + replacementFields := make(map[string]*ServiceConfig) + + for name, service := range v1Services { + replacementFields[name] = &ServiceConfig{ + Build: Build{ + Context: service.Build, + Dockerfile: service.Dockerfile, + }, + Logging: Log{ + Driver: service.LogDriver, + Options: service.LogOpt, + }, + NetworkMode: service.Net, + } + + v1Services[name].Build = "" + v1Services[name].Dockerfile = "" + v1Services[name].LogDriver = "" + v1Services[name].LogOpt = nil + v1Services[name].Net = "" + } + + if err := utils.Convert(v1Services, &v2Services); err != nil { + return nil, err + } + + for name := range v2Services { + v2Services[name].Build = replacementFields[name].Build + v2Services[name].Logging = replacementFields[name].Logging + v2Services[name].NetworkMode = replacementFields[name].NetworkMode + } + + return v2Services, nil +} diff --git a/vendor/github.com/docker/libcompose/project/hash.go b/vendor/github.com/docker/libcompose/config/hash.go similarity index 71% rename from vendor/github.com/docker/libcompose/project/hash.go rename to vendor/github.com/docker/libcompose/config/hash.go index f30182a0..a2f7f04a 100644 --- a/vendor/github.com/docker/libcompose/project/hash.go +++ b/vendor/github.com/docker/libcompose/config/hash.go @@ -1,4 +1,4 @@ -package project +package config import ( "crypto/sha1" @@ -7,6 +7,8 @@ import ( "io" "reflect" "sort" + + "github.com/docker/libcompose/yaml" ) // GetServiceHash computes and returns a hash that will identify a service. @@ -45,49 +47,36 @@ func GetServiceHash(name string, config *ServiceConfig) string { io.WriteString(hash, fmt.Sprintf("\n %v: ", serviceKey)) switch s := serviceValue.(type) { - case SliceorMap: + case yaml.SliceorMap: sliceKeys := []string{} - for lkey := range s.MapParts() { + for lkey := range s { sliceKeys = append(sliceKeys, lkey) } sort.Strings(sliceKeys) for _, sliceKey := range sliceKeys { - io.WriteString(hash, fmt.Sprintf("%s=%v, ", sliceKey, s.MapParts()[sliceKey])) + io.WriteString(hash, fmt.Sprintf("%s=%v, ", sliceKey, s[sliceKey])) } - case MaporEqualSlice: - sliceKeys := s.Slice() - // do not sort keys as the order matters - - for _, sliceKey := range sliceKeys { + case yaml.MaporEqualSlice: + for _, sliceKey := range s { io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) } - case MaporColonSlice: - sliceKeys := s.Slice() - // do not sort keys as the order matters - - for _, sliceKey := range sliceKeys { + case yaml.MaporColonSlice: + for _, sliceKey := range s { io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) } - case MaporSpaceSlice: - sliceKeys := s.Slice() - // do not sort keys as the order matters - - for _, sliceKey := range sliceKeys { + case yaml.MaporSpaceSlice: + for _, sliceKey := range s { io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) } - case Command: - sliceKeys := s.Slice() - // do not sort keys as the order matters - - for _, sliceKey := range sliceKeys { + case yaml.Command: + for _, sliceKey := range s { io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) } - case Stringorslice: - sliceKeys := s.Slice() - sort.Strings(sliceKeys) + case yaml.Stringorslice: + sort.Strings(s) - for _, sliceKey := range sliceKeys { + for _, sliceKey := range s { io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) } case []string: diff --git a/vendor/github.com/docker/libcompose/project/interpolation.go b/vendor/github.com/docker/libcompose/config/interpolation.go similarity index 88% rename from vendor/github.com/docker/libcompose/project/interpolation.go rename to vendor/github.com/docker/libcompose/config/interpolation.go index 6c79fade..fc420de9 100644 --- a/vendor/github.com/docker/libcompose/project/interpolation.go +++ b/vendor/github.com/docker/libcompose/config/interpolation.go @@ -1,9 +1,8 @@ -package project +package config import ( "bytes" "fmt" - "strconv" "strings" "github.com/Sirupsen/logrus" @@ -108,21 +107,11 @@ func parseConfig(option, service string, data *interface{}, mapping func(string) case string: var success bool - interpolatedLine, success := parseLine(typedData, mapping) + *data, success = parseLine(typedData, mapping) if !success { return fmt.Errorf("Invalid interpolation format for \"%s\" option in service \"%s\": \"%s\"", option, service, typedData) } - - // If possible, convert the value to an integer - // If the type should be a string and not an int, go-yaml will convert it back into a string - lineAsInteger, err := strconv.Atoi(interpolatedLine) - - if err == nil { - *data = lineAsInteger - } else { - *data = interpolatedLine - } case []interface{}: for k, v := range typedData { err := parseConfig(option, service, &v, mapping) @@ -148,7 +137,8 @@ func parseConfig(option, service string, data *interface{}, mapping func(string) return nil } -func interpolate(environmentLookup EnvironmentLookup, config *rawServiceMap) error { +// Interpolate replaces variables in the raw map representation of the project file +func Interpolate(environmentLookup EnvironmentLookup, config *RawServiceMap) error { for k, v := range *config { for k2, v2 := range v { err := parseConfig(k2, k, &v2, func(s string) string { diff --git a/vendor/github.com/docker/libcompose/config/merge.go b/vendor/github.com/docker/libcompose/config/merge.go new file mode 100644 index 00000000..586acd66 --- /dev/null +++ b/vendor/github.com/docker/libcompose/config/merge.go @@ -0,0 +1,166 @@ +package config + +import ( + "bufio" + "bytes" + "fmt" + "strings" + + yaml "github.com/cloudfoundry-incubator/candiedyaml" + "github.com/docker/docker/pkg/urlutil" +) + +var ( + noMerge = []string{ + "links", + "volumes_from", + } + defaultParseOptions = ParseOptions{ + Interpolate: true, + Validate: true, + } +) + +// Merge merges a compose file into an existing set of service configs +func Merge(existingServices *ServiceConfigs, environmentLookup EnvironmentLookup, resourceLookup ResourceLookup, file string, bytes []byte, options *ParseOptions) (map[string]*ServiceConfig, map[string]*VolumeConfig, map[string]*NetworkConfig, error) { + if options == nil { + options = &defaultParseOptions + } + + var config Config + if err := yaml.Unmarshal(bytes, &config); err != nil { + return nil, nil, nil, err + } + + var serviceConfigs map[string]*ServiceConfig + var volumeConfigs map[string]*VolumeConfig + var networkConfigs map[string]*NetworkConfig + if config.Version == "2" { + var err error + serviceConfigs, err = MergeServicesV2(existingServices, environmentLookup, resourceLookup, file, bytes, options) + if err != nil { + return nil, nil, nil, err + } + volumeConfigs, err = ParseVolumes(bytes) + if err != nil { + return nil, nil, nil, err + } + networkConfigs, err = ParseNetworks(bytes) + if err != nil { + return nil, nil, nil, err + } + } else { + serviceConfigsV1, err := MergeServicesV1(existingServices, environmentLookup, resourceLookup, file, bytes, options) + if err != nil { + return nil, nil, nil, err + } + serviceConfigs, err = ConvertServices(serviceConfigsV1) + if err != nil { + return nil, nil, nil, err + } + } + + adjustValues(serviceConfigs) + + if options.Postprocess != nil { + var err error + serviceConfigs, err = options.Postprocess(serviceConfigs) + if err != nil { + return nil, nil, nil, err + } + } + + return serviceConfigs, volumeConfigs, networkConfigs, nil +} + +func adjustValues(configs map[string]*ServiceConfig) { + // yaml parser turns "no" into "false" but that is not valid for a restart policy + for _, v := range configs { + if v.Restart == "false" { + v.Restart = "no" + } + } +} + +func readEnvFile(resourceLookup ResourceLookup, inFile string, serviceData RawService) (RawService, error) { + if _, ok := serviceData["env_file"]; !ok { + return serviceData, nil + } + envFiles := serviceData["env_file"].([]interface{}) + if len(envFiles) == 0 { + return serviceData, nil + } + + if resourceLookup == nil { + return nil, fmt.Errorf("Can not use env_file in file %s no mechanism provided to load files", inFile) + } + + var vars []interface{} + if _, ok := serviceData["environment"]; ok { + vars = serviceData["environment"].([]interface{}) + } + + for i := len(envFiles) - 1; i >= 0; i-- { + envFile := envFiles[i].(string) + content, _, err := resourceLookup.Lookup(envFile, inFile) + if err != nil { + return nil, err + } + + if err != nil { + return nil, err + } + + scanner := bufio.NewScanner(bytes.NewBuffer(content)) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + key := strings.SplitAfter(line, "=")[0] + + found := false + for _, v := range vars { + if strings.HasPrefix(v.(string), key) { + found = true + break + } + } + + if !found { + vars = append(vars, line) + } + } + + if scanner.Err() != nil { + return nil, scanner.Err() + } + } + + serviceData["environment"] = vars + + delete(serviceData, "env_file") + + return serviceData, nil +} + +func mergeConfig(baseService, serviceData RawService) RawService { + for k, v := range serviceData { + // Image and build are mutually exclusive in merge + if k == "image" { + delete(baseService, "build") + } else if k == "build" { + delete(baseService, "image") + } + existing, ok := baseService[k] + if ok { + baseService[k] = merge(existing, v) + } else { + baseService[k] = v + } + } + + return baseService +} + +// IsValidRemote checks if the specified string is a valid remote (for builds) +func IsValidRemote(remote string) bool { + return urlutil.IsGitURL(remote) || urlutil.IsURL(remote) +} diff --git a/vendor/github.com/docker/libcompose/config/merge_v1.go b/vendor/github.com/docker/libcompose/config/merge_v1.go new file mode 100644 index 00000000..c3bf8545 --- /dev/null +++ b/vendor/github.com/docker/libcompose/config/merge_v1.go @@ -0,0 +1,199 @@ +package config + +import ( + "fmt" + "path" + + "github.com/Sirupsen/logrus" + yaml "github.com/cloudfoundry-incubator/candiedyaml" + "github.com/docker/libcompose/utils" +) + +// MergeServicesV1 merges a v1 compose file into an existing set of service configs +func MergeServicesV1(existingServices *ServiceConfigs, environmentLookup EnvironmentLookup, resourceLookup ResourceLookup, file string, bytes []byte, options *ParseOptions) (map[string]*ServiceConfigV1, error) { + datas := make(RawServiceMap) + if err := yaml.Unmarshal(bytes, &datas); err != nil { + return nil, err + } + + if options.Interpolate { + if err := Interpolate(environmentLookup, &datas); err != nil { + return nil, err + } + } + + if options.Preprocess != nil { + var err error + datas, err = options.Preprocess(datas) + if err != nil { + return nil, err + } + } + + if options.Validate { + if err := validate(datas); err != nil { + return nil, err + } + } + + for name, data := range datas { + data, err := parseV1(resourceLookup, environmentLookup, file, data, datas, options) + if err != nil { + logrus.Errorf("Failed to parse service %s: %v", name, err) + return nil, err + } + + if serviceConfig, ok := existingServices.Get(name); ok { + var rawExistingService RawService + if err := utils.Convert(serviceConfig, &rawExistingService); err != nil { + return nil, err + } + + data = mergeConfig(rawExistingService, data) + } + + datas[name] = data + } + + if options.Validate { + for name, data := range datas { + err := validateServiceConstraints(data, name) + if err != nil { + return nil, err + } + } + } + + serviceConfigs := make(map[string]*ServiceConfigV1) + if err := utils.Convert(datas, &serviceConfigs); err != nil { + return nil, err + } + + return serviceConfigs, nil +} + +func parseV1(resourceLookup ResourceLookup, environmentLookup EnvironmentLookup, inFile string, serviceData RawService, datas RawServiceMap, options *ParseOptions) (RawService, error) { + serviceData, err := readEnvFile(resourceLookup, inFile, serviceData) + if err != nil { + return nil, err + } + + serviceData = resolveContextV1(inFile, serviceData) + + value, ok := serviceData["extends"] + if !ok { + return serviceData, nil + } + + mapValue, ok := value.(map[interface{}]interface{}) + if !ok { + return serviceData, nil + } + + if resourceLookup == nil { + return nil, fmt.Errorf("Can not use extends in file %s no mechanism provided to files", inFile) + } + + file := asString(mapValue["file"]) + service := asString(mapValue["service"]) + + if service == "" { + return serviceData, nil + } + + var baseService RawService + + if file == "" { + if serviceData, ok := datas[service]; ok { + baseService, err = parseV1(resourceLookup, environmentLookup, inFile, serviceData, datas, options) + } else { + return nil, fmt.Errorf("Failed to find service %s to extend", service) + } + } else { + bytes, resolved, err := resourceLookup.Lookup(file, inFile) + if err != nil { + logrus.Errorf("Failed to lookup file %s: %v", file, err) + return nil, err + } + + var baseRawServices RawServiceMap + if err := yaml.Unmarshal(bytes, &baseRawServices); err != nil { + return nil, err + } + + if options.Interpolate { + err = Interpolate(environmentLookup, &baseRawServices) + if err != nil { + return nil, err + } + } + + if options.Preprocess != nil { + var err error + baseRawServices, err = options.Preprocess(baseRawServices) + if err != nil { + return nil, err + } + } + + if options.Validate { + if err := validate(baseRawServices); err != nil { + return nil, err + } + } + + baseService, ok = baseRawServices[service] + if !ok { + return nil, fmt.Errorf("Failed to find service %s in file %s", service, file) + } + + baseService, err = parseV1(resourceLookup, environmentLookup, resolved, baseService, baseRawServices, options) + } + + if err != nil { + return nil, err + } + + baseService = clone(baseService) + + logrus.Debugf("Merging %#v, %#v", baseService, serviceData) + + for _, k := range noMerge { + if _, ok := baseService[k]; ok { + source := file + if source == "" { + source = inFile + } + return nil, fmt.Errorf("Cannot extend service '%s' in %s: services with '%s' cannot be extended", service, source, k) + } + } + + baseService = mergeConfig(baseService, serviceData) + + logrus.Debugf("Merged result %#v", baseService) + + return baseService, nil +} + +func resolveContextV1(inFile string, serviceData RawService) RawService { + context := asString(serviceData["build"]) + if context == "" { + return serviceData + } + + if IsValidRemote(context) { + return serviceData + } + + current := path.Dir(inFile) + + if context == "." { + context = current + } else { + current = path.Join(current, context) + } + + serviceData["build"] = current + + return serviceData +} diff --git a/vendor/github.com/docker/libcompose/config/merge_v2.go b/vendor/github.com/docker/libcompose/config/merge_v2.go new file mode 100644 index 00000000..a2fe4daa --- /dev/null +++ b/vendor/github.com/docker/libcompose/config/merge_v2.go @@ -0,0 +1,210 @@ +package config + +import ( + "fmt" + "path" + + "github.com/Sirupsen/logrus" + yaml "github.com/cloudfoundry-incubator/candiedyaml" + "github.com/docker/libcompose/utils" +) + +// MergeServicesV2 merges a v2 compose file into an existing set of service configs +func MergeServicesV2(existingServices *ServiceConfigs, environmentLookup EnvironmentLookup, resourceLookup ResourceLookup, file string, bytes []byte, options *ParseOptions) (map[string]*ServiceConfig, error) { + var config Config + if err := yaml.Unmarshal(bytes, &config); err != nil { + return nil, err + } + + datas := config.Services + + if options.Interpolate { + if err := Interpolate(environmentLookup, &datas); err != nil { + return nil, err + } + } + + if options.Preprocess != nil { + var err error + datas, err = options.Preprocess(datas) + if err != nil { + return nil, err + } + } + + for name, data := range datas { + data, err := parseV2(resourceLookup, environmentLookup, file, data, datas, options) + if err != nil { + logrus.Errorf("Failed to parse service %s: %v", name, err) + return nil, err + } + + if serviceConfig, ok := existingServices.Get(name); ok { + var rawExistingService RawService + if err := utils.Convert(serviceConfig, &rawExistingService); err != nil { + return nil, err + } + + data = mergeConfig(rawExistingService, data) + } + + datas[name] = data + } + + serviceConfigs := make(map[string]*ServiceConfig) + if err := utils.Convert(datas, &serviceConfigs); err != nil { + return nil, err + } + + return serviceConfigs, nil +} + +// ParseVolumes parses volumes in a compose file +func ParseVolumes(bytes []byte) (map[string]*VolumeConfig, error) { + volumeConfigs := make(map[string]*VolumeConfig) + + var config Config + if err := yaml.Unmarshal(bytes, &config); err != nil { + return nil, err + } + + if err := utils.Convert(config.Volumes, &volumeConfigs); err != nil { + return nil, err + } + + return volumeConfigs, nil +} + +// ParseNetworks parses networks in a compose file +func ParseNetworks(bytes []byte) (map[string]*NetworkConfig, error) { + networkConfigs := make(map[string]*NetworkConfig) + + var config Config + if err := yaml.Unmarshal(bytes, &config); err != nil { + return nil, err + } + + if err := utils.Convert(config.Networks, &networkConfigs); err != nil { + return nil, err + } + + return networkConfigs, nil +} + +func parseV2(resourceLookup ResourceLookup, environmentLookup EnvironmentLookup, inFile string, serviceData RawService, datas RawServiceMap, options *ParseOptions) (RawService, error) { + serviceData, err := readEnvFile(resourceLookup, inFile, serviceData) + if err != nil { + return nil, err + } + + serviceData = resolveContextV2(inFile, serviceData) + + value, ok := serviceData["extends"] + if !ok { + return serviceData, nil + } + + mapValue, ok := value.(map[interface{}]interface{}) + if !ok { + return serviceData, nil + } + + if resourceLookup == nil { + return nil, fmt.Errorf("Can not use extends in file %s no mechanism provided to files", inFile) + } + + file := asString(mapValue["file"]) + service := asString(mapValue["service"]) + + if service == "" { + return serviceData, nil + } + + var baseService RawService + + if file == "" { + if serviceData, ok := datas[service]; ok { + baseService, err = parseV2(resourceLookup, environmentLookup, inFile, serviceData, datas, options) + } else { + return nil, fmt.Errorf("Failed to find service %s to extend", service) + } + } else { + bytes, resolved, err := resourceLookup.Lookup(file, inFile) + if err != nil { + logrus.Errorf("Failed to lookup file %s: %v", file, err) + return nil, err + } + + var config Config + if err := yaml.Unmarshal(bytes, &config); err != nil { + return nil, err + } + + baseRawServices := config.Services + + if options.Interpolate { + err = Interpolate(environmentLookup, &baseRawServices) + if err != nil { + return nil, err + } + } + + baseService, ok = baseRawServices[service] + if !ok { + return nil, fmt.Errorf("Failed to find service %s in file %s", service, file) + } + + baseService, err = parseV2(resourceLookup, environmentLookup, resolved, baseService, baseRawServices, options) + } + + if err != nil { + return nil, err + } + + baseService = clone(baseService) + + logrus.Debugf("Merging %#v, %#v", baseService, serviceData) + + for _, k := range noMerge { + if _, ok := baseService[k]; ok { + source := file + if source == "" { + source = inFile + } + return nil, fmt.Errorf("Cannot extend service '%s' in %s: services with '%s' cannot be extended", service, source, k) + } + } + + baseService = mergeConfig(baseService, serviceData) + + logrus.Debugf("Merged result %#v", baseService) + + return baseService, nil +} + +func resolveContextV2(inFile string, serviceData RawService) RawService { + if _, ok := serviceData["build"]; !ok { + return serviceData + } + build := serviceData["build"].(map[interface{}]interface{}) + context := asString(build["context"]) + if context == "" { + return serviceData + } + + if IsValidRemote(context) { + return serviceData + } + + current := path.Dir(inFile) + + if context == "." { + context = current + } else { + current = path.Join(current, context) + } + + build["context"] = current + + return serviceData +} diff --git a/vendor/github.com/docker/libcompose/config/schema.go b/vendor/github.com/docker/libcompose/config/schema.go new file mode 100644 index 00000000..c2b8e171 --- /dev/null +++ b/vendor/github.com/docker/libcompose/config/schema.go @@ -0,0 +1,510 @@ +package config + +var schemaV1 = `{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "config_schema_v1.json", + + "type": "object", + + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + + "additionalProperties": false, + + "definitions": { + "service": { + "id": "#/definitions/service", + "type": "object", + + "properties": { + "build": {"type": "string"}, + "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cgroup_parent": {"type": "string"}, + "command": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "container_name": {"type": "string"}, + "cpu_shares": {"type": ["number", "string"]}, + "cpu_quota": {"type": ["number", "string"]}, + "cpuset": {"type": "string"}, + "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "dns": {"$ref": "#/definitions/string_or_list"}, + "dns_search": {"$ref": "#/definitions/string_or_list"}, + "dockerfile": {"type": "string"}, + "domainname": {"type": "string"}, + "entrypoint": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "env_file": {"$ref": "#/definitions/string_or_list"}, + "environment": {"$ref": "#/definitions/list_or_dict"}, + + "expose": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "expose" + }, + "uniqueItems": true + }, + + "extends": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "object", + + "properties": { + "service": {"type": "string"}, + "file": {"type": "string"} + }, + "required": ["service"], + "additionalProperties": false + } + ] + }, + + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "hostname": {"type": "string"}, + "image": {"type": "string"}, + "ipc": {"type": "string"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "log_driver": {"type": "string"}, + "log_opt": {"type": "object"}, + "mac_address": {"type": "string"}, + "mem_limit": {"type": ["number", "string"]}, + "memswap_limit": {"type": ["number", "string"]}, + "net": {"type": "string"}, + "pid": {"type": ["string", "null"]}, + + "ports": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "ports" + }, + "uniqueItems": true + }, + + "privileged": {"type": "boolean"}, + "read_only": {"type": "boolean"}, + "restart": {"type": "string"}, + "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "shm_size": {"type": ["number", "string"]}, + "stdin_open": {"type": "boolean"}, + "stop_signal": {"type": "string"}, + "tty": {"type": "boolean"}, + "ulimits": { + "type": "object", + "patternProperties": { + "^[a-z]+$": { + "oneOf": [ + {"type": "integer"}, + { + "type":"object", + "properties": { + "hard": {"type": "integer"}, + "soft": {"type": "integer"} + }, + "required": ["soft", "hard"], + "additionalProperties": false + } + ] + } + } + }, + "user": {"type": "string"}, + "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "volume_driver": {"type": "string"}, + "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "working_dir": {"type": "string"} + }, + + "dependencies": { + "memswap_limit": ["mem_limit"] + }, + "additionalProperties": false + }, + + "string_or_list": { + "oneOf": [ + {"type": "string"}, + {"$ref": "#/definitions/list_of_strings"} + ] + }, + + "list_of_strings": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + + "list_or_dict": { + "oneOf": [ + { + "type": "object", + "patternProperties": { + ".+": { + "type": ["string", "number", "null"] + } + }, + "additionalProperties": false + }, + {"type": "array", "items": {"type": "string"}, "uniqueItems": true} + ] + }, + + "constraints": { + "service": { + "id": "#/definitions/constraints/service", + "anyOf": [ + { + "required": ["build"], + "not": {"required": ["image"]} + }, + { + "required": ["image"], + "not": {"anyOf": [ + {"required": ["build"]}, + {"required": ["dockerfile"]} + ]} + } + ] + } + } + } +} +` + +var schemaV2 = `{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "config_schema_v2.0.json", + "type": "object", + + "properties": { + "version": { + "type": "string" + }, + + "services": { + "id": "#/properties/services", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + "additionalProperties": false + }, + + "networks": { + "id": "#/properties/networks", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/network" + } + } + }, + + "volumes": { + "id": "#/properties/volumes", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/volume" + } + }, + "additionalProperties": false + } + }, + + "additionalProperties": false, + + "definitions": { + + "service": { + "id": "#/definitions/service", + "type": "object", + + "properties": { + "build": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "context": {"type": "string"}, + "dockerfile": {"type": "string"}, + "args": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + } + ] + }, + "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cgroup_parent": {"type": "string"}, + "command": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "container_name": {"type": "string"}, + "cpu_shares": {"type": ["number", "string"]}, + "cpu_quota": {"type": ["number", "string"]}, + "cpuset": {"type": "string"}, + "depends_on": {"$ref": "#/definitions/list_of_strings"}, + "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "dns": {"$ref": "#/definitions/string_or_list"}, + "dns_search": {"$ref": "#/definitions/string_or_list"}, + "domainname": {"type": "string"}, + "entrypoint": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "env_file": {"$ref": "#/definitions/string_or_list"}, + "environment": {"$ref": "#/definitions/list_or_dict"}, + + "expose": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "expose" + }, + "uniqueItems": true + }, + + "extends": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "object", + + "properties": { + "service": {"type": "string"}, + "file": {"type": "string"} + }, + "required": ["service"], + "additionalProperties": false + } + ] + }, + + "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "hostname": {"type": "string"}, + "image": {"type": "string"}, + "ipc": {"type": "string"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + + "logging": { + "type": "object", + + "properties": { + "driver": {"type": "string"}, + "options": {"type": "object"} + }, + "additionalProperties": false + }, + + "mac_address": {"type": "string"}, + "mem_limit": {"type": ["number", "string"]}, + "memswap_limit": {"type": ["number", "string"]}, + "network_mode": {"type": "string"}, + + "networks": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "oneOf": [ + { + "type": "object", + "properties": { + "aliases": {"$ref": "#/definitions/list_of_strings"}, + "ipv4_address": {"type": "string"}, + "ipv6_address": {"type": "string"} + }, + "additionalProperties": false + }, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + ] + }, + "pid": {"type": ["string", "null"]}, + + "ports": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "ports" + }, + "uniqueItems": true + }, + + "privileged": {"type": "boolean"}, + "read_only": {"type": "boolean"}, + "restart": {"type": "string"}, + "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "shm_size": {"type": ["number", "string"]}, + "stdin_open": {"type": "boolean"}, + "stop_signal": {"type": "string"}, + "tmpfs": {"$ref": "#/definitions/string_or_list"}, + "tty": {"type": "boolean"}, + "ulimits": { + "type": "object", + "patternProperties": { + "^[a-z]+$": { + "oneOf": [ + {"type": "integer"}, + { + "type":"object", + "properties": { + "hard": {"type": "integer"}, + "soft": {"type": "integer"} + }, + "required": ["soft", "hard"], + "additionalProperties": false + } + ] + } + } + }, + "user": {"type": "string"}, + "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "volume_driver": {"type": "string"}, + "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "working_dir": {"type": "string"} + }, + + "dependencies": { + "memswap_limit": ["mem_limit"] + }, + "additionalProperties": false + }, + + "network": { + "id": "#/definitions/network", + "type": "object", + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "ipam": { + "type": "object", + "properties": { + "driver": {"type": "string"}, + "config": { + "type": "array" + } + }, + "additionalProperties": false + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + + "volume": { + "id": "#/definitions/volume", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + } + }, + "additionalProperties": false + }, + "additionalProperties": false + }, + + "string_or_list": { + "oneOf": [ + {"type": "string"}, + {"$ref": "#/definitions/list_of_strings"} + ] + }, + + "list_of_strings": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + + "list_or_dict": { + "oneOf": [ + { + "type": "object", + "patternProperties": { + ".+": { + "type": ["string", "number", "null"] + } + }, + "additionalProperties": false + }, + {"type": "array", "items": {"type": "string"}, "uniqueItems": true} + ] + }, + + "constraints": { + "service": { + "id": "#/definitions/constraints/service", + "anyOf": [ + {"required": ["build"]}, + {"required": ["image"]} + ], + "properties": { + "build": { + "required": ["context"] + } + } + } + } + } +} +` diff --git a/vendor/github.com/docker/libcompose/config/schema_helpers.go b/vendor/github.com/docker/libcompose/config/schema_helpers.go new file mode 100644 index 00000000..08e0ca3d --- /dev/null +++ b/vendor/github.com/docker/libcompose/config/schema_helpers.go @@ -0,0 +1,92 @@ +package config + +import ( + "encoding/json" + "strings" + + "github.com/docker/go-connections/nat" + "github.com/xeipuuv/gojsonschema" +) + +var ( + schemaLoader gojsonschema.JSONLoader + constraintSchemaLoader gojsonschema.JSONLoader + schema map[string]interface{} +) + +type ( + environmentFormatChecker struct{} + portsFormatChecker struct{} +) + +func (checker environmentFormatChecker) IsFormat(input string) bool { + // If the value is a boolean, a warning should be given + // However, we can't determine type since gojsonschema converts the value to a string + // Adding a function with an interface{} parameter to gojsonschema is probably the best way to handle this + return true +} + +func (checker portsFormatChecker) IsFormat(input string) bool { + _, _, err := nat.ParsePortSpecs([]string{input}) + return err == nil +} + +func setupSchemaLoaders() error { + if schema != nil { + return nil + } + + var schemaRaw interface{} + err := json.Unmarshal([]byte(schemaV1), &schemaRaw) + if err != nil { + return err + } + + schema = schemaRaw.(map[string]interface{}) + + gojsonschema.FormatCheckers.Add("environment", environmentFormatChecker{}) + gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{}) + gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{}) + schemaLoader = gojsonschema.NewGoLoader(schemaRaw) + + definitions := schema["definitions"].(map[string]interface{}) + constraints := definitions["constraints"].(map[string]interface{}) + service := constraints["service"].(map[string]interface{}) + constraintSchemaLoader = gojsonschema.NewGoLoader(service) + + return nil +} + +// gojsonschema doesn't provide a list of valid types for a property +// This parses the schema manually to find all valid types +func parseValidTypesFromSchema(schema map[string]interface{}, context string) []string { + contextSplit := strings.Split(context, ".") + key := contextSplit[len(contextSplit)-1] + + definitions := schema["definitions"].(map[string]interface{}) + service := definitions["service"].(map[string]interface{}) + properties := service["properties"].(map[string]interface{}) + property := properties[key].(map[string]interface{}) + + var validTypes []string + + if val, ok := property["oneOf"]; ok { + validConditions := val.([]interface{}) + + for _, validCondition := range validConditions { + condition := validCondition.(map[string]interface{}) + validTypes = append(validTypes, condition["type"].(string)) + } + } else if val, ok := property["$ref"]; ok { + reference := val.(string) + if reference == "#/definitions/string_or_list" { + return []string{"string", "array"} + } else if reference == "#/definitions/list_of_strings" { + return []string{"array"} + } else if reference == "#/definitions/list_or_dict" { + return []string{"array", "object"} + } + } + + return validTypes +} diff --git a/vendor/github.com/docker/libcompose/config/types.go b/vendor/github.com/docker/libcompose/config/types.go new file mode 100644 index 00000000..c8a305d3 --- /dev/null +++ b/vendor/github.com/docker/libcompose/config/types.go @@ -0,0 +1,231 @@ +package config + +import ( + "sync" + + "github.com/docker/libcompose/yaml" +) + +// EnvironmentLookup defines methods to provides environment variable loading. +type EnvironmentLookup interface { + Lookup(key, serviceName string, config *ServiceConfig) []string +} + +// ResourceLookup defines methods to provides file loading. +type ResourceLookup interface { + Lookup(file, relativeTo string) ([]byte, string, error) + ResolvePath(path, inFile string) string +} + +// ServiceConfigV1 holds version 1 of libcompose service configuration +type ServiceConfigV1 struct { + Build string `yaml:"build,omitempty"` + CapAdd []string `yaml:"cap_add,omitempty"` + CapDrop []string `yaml:"cap_drop,omitempty"` + CgroupParent string `yaml:"cgroup_parent,omitempty"` + CPUQuota int64 `yaml:"cpu_quota,omitempty"` + CPUSet string `yaml:"cpuset,omitempty"` + CPUShares int64 `yaml:"cpu_shares,omitempty"` + Command yaml.Command `yaml:"command,flow,omitempty"` + ContainerName string `yaml:"container_name,omitempty"` + Devices []string `yaml:"devices,omitempty"` + DNS yaml.Stringorslice `yaml:"dns,omitempty"` + DNSSearch yaml.Stringorslice `yaml:"dns_search,omitempty"` + Dockerfile string `yaml:"dockerfile,omitempty"` + DomainName string `yaml:"domainname,omitempty"` + Entrypoint yaml.Command `yaml:"entrypoint,flow,omitempty"` + EnvFile yaml.Stringorslice `yaml:"env_file,omitempty"` + Environment yaml.MaporEqualSlice `yaml:"environment,omitempty"` + Hostname string `yaml:"hostname,omitempty"` + Image string `yaml:"image,omitempty"` + Labels yaml.SliceorMap `yaml:"labels,omitempty"` + Links yaml.MaporColonSlice `yaml:"links,omitempty"` + LogDriver string `yaml:"log_driver,omitempty"` + MacAddress string `yaml:"mac_address,omitempty"` + MemLimit int64 `yaml:"mem_limit,omitempty"` + MemSwapLimit int64 `yaml:"memswap_limit,omitempty"` + Name string `yaml:"name,omitempty"` + Net string `yaml:"net,omitempty"` + Pid string `yaml:"pid,omitempty"` + Uts string `yaml:"uts,omitempty"` + Ipc string `yaml:"ipc,omitempty"` + Ports []string `yaml:"ports,omitempty"` + Privileged bool `yaml:"privileged,omitempty"` + Restart string `yaml:"restart,omitempty"` + ReadOnly bool `yaml:"read_only,omitempty"` + StdinOpen bool `yaml:"stdin_open,omitempty"` + SecurityOpt []string `yaml:"security_opt,omitempty"` + Tty bool `yaml:"tty,omitempty"` + User string `yaml:"user,omitempty"` + VolumeDriver string `yaml:"volume_driver,omitempty"` + Volumes []string `yaml:"volumes,omitempty"` + VolumesFrom []string `yaml:"volumes_from,omitempty"` + WorkingDir string `yaml:"working_dir,omitempty"` + Expose []string `yaml:"expose,omitempty"` + ExternalLinks []string `yaml:"external_links,omitempty"` + LogOpt map[string]string `yaml:"log_opt,omitempty"` + ExtraHosts []string `yaml:"extra_hosts,omitempty"` + Ulimits yaml.Ulimits `yaml:"ulimits,omitempty"` +} + +// Build holds v2 build information +type Build struct { + Context string `yaml:"context,omitempty"` + Dockerfile string `yaml:"dockerfile,omitempty"` + Args yaml.MaporEqualSlice `yaml:"args,omitempty"` +} + +// Log holds v2 logging information +type Log struct { + Driver string `yaml:"driver,omitempty"` + Options map[string]string `yaml:"options,omitempty"` +} + +// ServiceConfig holds version 2 of libcompose service configuration +type ServiceConfig struct { + Build Build `yaml:"build,omitempty"` + CapAdd []string `yaml:"cap_add,omitempty"` + CapDrop []string `yaml:"cap_drop,omitempty"` + CPUSet string `yaml:"cpuset,omitempty"` + CPUShares int64 `yaml:"cpu_shares,omitempty"` + CPUQuota int64 `yaml:"cpu_quota,omitempty"` + Command yaml.Command `yaml:"command,flow,omitempty"` + CgroupParent string `yaml:"cgroup_parrent,omitempty"` + ContainerName string `yaml:"container_name,omitempty"` + Devices []string `yaml:"devices,omitempty"` + DependsOn []string `yaml:"depends_on,omitempty"` + DNS yaml.Stringorslice `yaml:"dns,omitempty"` + DNSSearch yaml.Stringorslice `yaml:"dns_search,omitempty"` + DomainName string `yaml:"domain_name,omitempty"` + Entrypoint yaml.Command `yaml:"entrypoint,flow,omitempty"` + EnvFile yaml.Stringorslice `yaml:"env_file,omitempty"` + Environment yaml.MaporEqualSlice `yaml:"environment,omitempty"` + Expose []string `yaml:"expose,omitempty"` + Extends yaml.MaporEqualSlice `yaml:"extends,omitempty"` + ExternalLinks []string `yaml:"external_links,omitempty"` + ExtraHosts []string `yaml:"extra_hosts,omitempty"` + Image string `yaml:"image,omitempty"` + Hostname string `yaml:"hostname,omitempty"` + Ipc string `yaml:"ipc,omitempty"` + Labels yaml.SliceorMap `yaml:"labels,omitempty"` + Links yaml.MaporColonSlice `yaml:"links,omitempty"` + Logging Log `yaml:"logging,omitempty"` + MacAddress string `yaml:"mac_address,omitempty"` + MemLimit int64 `yaml:"mem_limit,omitempty"` + MemSwapLimit int64 `yaml:"memswap_limit,omitempty"` + NetworkMode string `yaml:"network_mode,omitempty"` + Networks []string `yaml:"networks,omitempty"` + Pid string `yaml:"pid,omitempty"` + Ports []string `yaml:"ports,omitempty"` + Privileged bool `yaml:"privileged,omitempty"` + SecurityOpt []string `yaml:"security_opt,omitempty"` + StopSignal string `yaml:"stop_signal,omitempty"` + VolumeDriver string `yaml:"volume_driver,omitempty"` + Volumes []string `yaml:"volumes,omitempty"` + VolumesFrom []string `yaml:"volumes_from,omitempty"` + Uts string `yaml:"uts,omitempty"` + Restart string `yaml:"restart,omitempty"` + ReadOnly bool `yaml:"read_only,omitempty"` + StdinOpen bool `yaml:"stdin_open,omitempty"` + Tty bool `yaml:"tty,omitempty"` + User string `yaml:"user,omitempty"` + WorkingDir string `yaml:"working_dir,omitempty"` + Ulimits yaml.Ulimits `yaml:"ulimits,omitempty"` +} + +// VolumeConfig holds v2 volume configuration +type VolumeConfig struct { + Driver string `yaml:"driver,omitempty"` + DriverOpts map[string]string `yaml:"driver_opts,omitempty"` + External bool `yaml:"external,omitempty"` +} + +// Ipam holds v2 network IPAM information +type Ipam struct { + Driver string `yaml:"driver,omitempty"` + Config []string `yaml:"config,omitempty"` +} + +// NetworkConfig holds v2 network configuration +type NetworkConfig struct { + Driver string `yaml:"driver,omitempty"` + DriverOpts map[string]string `yaml:"driver_opts,omitempty"` + External bool `yaml:"external,omitempty"` + Ipam Ipam `yaml:"ipam,omitempty"` +} + +// Config holds libcompose top level configuration +type Config struct { + Version string `yaml:"version,omitempty"` + Services RawServiceMap `yaml:"services,omitempty"` + Volumes map[string]*VolumeConfig `yaml:"volumes,omitempty"` + Networks map[string]*NetworkConfig `yaml:"networks,omitempty"` +} + +// NewServiceConfigs initializes a new Configs struct +func NewServiceConfigs() *ServiceConfigs { + return &ServiceConfigs{ + m: make(map[string]*ServiceConfig), + } +} + +// ServiceConfigs holds a concurrent safe map of ServiceConfig +type ServiceConfigs struct { + m map[string]*ServiceConfig + mu sync.RWMutex +} + +// Has checks if the config map has the specified name +func (c *ServiceConfigs) Has(name string) bool { + c.mu.RLock() + defer c.mu.RUnlock() + _, ok := c.m[name] + return ok +} + +// Get returns the config and the presence of the specified name +func (c *ServiceConfigs) Get(name string) (*ServiceConfig, bool) { + c.mu.RLock() + defer c.mu.RUnlock() + service, ok := c.m[name] + return service, ok +} + +// Add add the specifed config with the specified name +func (c *ServiceConfigs) Add(name string, service *ServiceConfig) { + c.mu.Lock() + c.m[name] = service + c.mu.Unlock() +} + +// Len returns the len of the configs +func (c *ServiceConfigs) Len() int { + c.mu.RLock() + defer c.mu.RUnlock() + return len(c.m) +} + +// Keys returns the names of the config +func (c *ServiceConfigs) Keys() []string { + keys := []string{} + c.mu.RLock() + defer c.mu.RUnlock() + for name := range c.m { + keys = append(keys, name) + } + return keys +} + +// RawService is represent a Service in map form unparsed +type RawService map[string]interface{} + +// RawServiceMap is a collection of RawServices +type RawServiceMap map[string]RawService + +// ParseOptions are a set of options to customize the parsing process +type ParseOptions struct { + Interpolate bool + Validate bool + Preprocess func(RawServiceMap) (RawServiceMap, error) + Postprocess func(map[string]*ServiceConfig) (map[string]*ServiceConfig, error) +} diff --git a/vendor/github.com/docker/libcompose/config/utils.go b/vendor/github.com/docker/libcompose/config/utils.go new file mode 100644 index 00000000..ae9b86cf --- /dev/null +++ b/vendor/github.com/docker/libcompose/config/utils.go @@ -0,0 +1,42 @@ +package config + +func merge(existing, value interface{}) interface{} { + // append strings + if left, lok := existing.([]interface{}); lok { + if right, rok := value.([]interface{}); rok { + return append(left, right...) + } + } + + //merge maps + if left, lok := existing.(map[interface{}]interface{}); lok { + if right, rok := value.(map[interface{}]interface{}); rok { + newLeft := make(map[interface{}]interface{}) + for k, v := range left { + newLeft[k] = v + } + for k, v := range right { + newLeft[k] = v + } + return newLeft + } + } + + return value +} + +func clone(in RawService) RawService { + result := RawService{} + for k, v := range in { + result[k] = v + } + + return result +} + +func asString(obj interface{}) string { + if v, ok := obj.(string); ok { + return v + } + return "" +} diff --git a/vendor/github.com/docker/libcompose/config/validation.go b/vendor/github.com/docker/libcompose/config/validation.go new file mode 100644 index 00000000..177a6495 --- /dev/null +++ b/vendor/github.com/docker/libcompose/config/validation.go @@ -0,0 +1,300 @@ +package config + +import ( + "fmt" + "strconv" + "strings" + + "github.com/xeipuuv/gojsonschema" +) + +func serviceNameFromErrorField(field string) string { + splitKeys := strings.Split(field, ".") + return splitKeys[0] +} + +func keyNameFromErrorField(field string) string { + splitKeys := strings.Split(field, ".") + + if len(splitKeys) > 0 { + return splitKeys[len(splitKeys)-1] + } + + return "" +} + +func containsTypeError(resultError gojsonschema.ResultError) bool { + contextSplit := strings.Split(resultError.Context().String(), ".") + _, err := strconv.Atoi(contextSplit[len(contextSplit)-1]) + return err == nil +} + +func addArticle(s string) string { + switch s[0] { + case 'a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U': + return "an " + s + default: + return "a " + s + } +} + +// Gets the value in a service map at a given error context +func getValue(val interface{}, context string) string { + keys := strings.Split(context, ".") + + if keys[0] == "(root)" { + keys = keys[1:] + } + + for i, k := range keys { + switch typedVal := (val).(type) { + case string: + return typedVal + case []interface{}: + if index, err := strconv.Atoi(k); err == nil { + val = typedVal[index] + } + case RawServiceMap: + val = typedVal[k] + case RawService: + val = typedVal[k] + case map[interface{}]interface{}: + val = typedVal[k] + } + + if i == len(keys)-1 { + return fmt.Sprint(val) + } + } + + return "" +} + +// Converts map[interface{}]interface{} to map[string]interface{} recursively +// gojsonschema only accepts map[string]interface{} +func convertServiceMapKeysToStrings(serviceMap RawServiceMap) RawServiceMap { + newServiceMap := make(RawServiceMap) + + for k, v := range serviceMap { + newServiceMap[k] = convertServiceKeysToStrings(v) + } + + return newServiceMap +} + +func convertServiceKeysToStrings(service RawService) RawService { + newService := make(RawService) + + for k, v := range service { + newService[k] = convertKeysToStrings(v) + } + + return newService +} + +func convertKeysToStrings(item interface{}) interface{} { + switch typedDatas := item.(type) { + + case map[interface{}]interface{}: + newMap := make(map[string]interface{}) + + for key, value := range typedDatas { + stringKey := key.(string) + newMap[stringKey] = convertKeysToStrings(value) + } + return newMap + + case []interface{}: + // newArray := make([]interface{}, 0) will cause golint to complain + var newArray []interface{} + newArray = make([]interface{}, 0) + + for _, value := range typedDatas { + newArray = append(newArray, convertKeysToStrings(value)) + } + return newArray + + default: + return item + } +} + +var dockerConfigHints = map[string]string{ + "cpu_share": "cpu_shares", + "add_host": "extra_hosts", + "hosts": "extra_hosts", + "extra_host": "extra_hosts", + "device": "devices", + "link": "links", + "memory_swap": "memswap_limit", + "port": "ports", + "privilege": "privileged", + "priviliged": "privileged", + "privilige": "privileged", + "volume": "volumes", + "workdir": "working_dir", +} + +func unsupportedConfigMessage(key string, nextErr gojsonschema.ResultError) string { + service := serviceNameFromErrorField(nextErr.Field()) + + message := fmt.Sprintf("Unsupported config option for %s service: '%s'", service, key) + if val, ok := dockerConfigHints[key]; ok { + message += fmt.Sprintf(" (did you mean '%s'?)", val) + } + + return message +} + +func oneOfMessage(serviceMap RawServiceMap, schema map[string]interface{}, err, nextErr gojsonschema.ResultError) string { + switch nextErr.Type() { + case "additional_property_not_allowed": + property := nextErr.Details()["property"] + + return fmt.Sprintf("contains unsupported option: '%s'", property) + case "invalid_type": + if containsTypeError(nextErr) { + expectedType := addArticle(nextErr.Details()["expected"].(string)) + + return fmt.Sprintf("contains %s, which is an invalid type, it should be %s", getValue(serviceMap, nextErr.Context().String()), expectedType) + } + + validTypes := parseValidTypesFromSchema(schema, err.Context().String()) + + validTypesMsg := addArticle(strings.Join(validTypes, " or ")) + + return fmt.Sprintf("contains an invalid type, it should be %s", validTypesMsg) + case "unique": + contextWithDuplicates := getValue(serviceMap, nextErr.Context().String()) + + return fmt.Sprintf("contains non unique items, please remove duplicates from %s", contextWithDuplicates) + } + + return "" +} + +func invalidTypeMessage(service, key string, err gojsonschema.ResultError) string { + expectedTypesString := err.Details()["expected"].(string) + var expectedTypes []string + + if strings.Contains(expectedTypesString, ",") { + expectedTypes = strings.Split(expectedTypesString[1:len(expectedTypesString)-1], ",") + } else { + expectedTypes = []string{expectedTypesString} + } + + validTypesMsg := addArticle(strings.Join(expectedTypes, " or ")) + + return fmt.Sprintf("Service '%s' configuration key '%s' contains an invalid type, it should be %s.", service, key, validTypesMsg) +} + +func validate(serviceMap RawServiceMap) error { + if err := setupSchemaLoaders(); err != nil { + return err + } + + serviceMap = convertServiceMapKeysToStrings(serviceMap) + + var validationErrors []string + + dataLoader := gojsonschema.NewGoLoader(serviceMap) + + result, err := gojsonschema.Validate(schemaLoader, dataLoader) + if err != nil { + return err + } + + // gojsonschema can create extraneous "additional_property_not_allowed" errors in some cases + // If this is set, and the error is at root level, skip over that error + skipRootAdditionalPropertyError := false + + if !result.Valid() { + for i := 0; i < len(result.Errors()); i++ { + err := result.Errors()[i] + + if skipRootAdditionalPropertyError && err.Type() == "additional_property_not_allowed" && err.Context().String() == "(root)" { + skipRootAdditionalPropertyError = false + continue + } + + if err.Context().String() == "(root)" { + switch err.Type() { + case "additional_property_not_allowed": + validationErrors = append(validationErrors, fmt.Sprintf("Invalid service name '%s' - only [a-zA-Z0-9\\._\\-] characters are allowed", err.Field())) + default: + validationErrors = append(validationErrors, err.Description()) + } + } else { + skipRootAdditionalPropertyError = true + + serviceName := serviceNameFromErrorField(err.Field()) + key := keyNameFromErrorField(err.Field()) + + switch err.Type() { + case "additional_property_not_allowed": + validationErrors = append(validationErrors, unsupportedConfigMessage(key, result.Errors()[i+1])) + case "number_one_of": + validationErrors = append(validationErrors, fmt.Sprintf("Service '%s' configuration key '%s' %s", serviceName, key, oneOfMessage(serviceMap, schema, err, result.Errors()[i+1]))) + + // Next error handled in oneOfMessage, skip over it + i++ + case "invalid_type": + validationErrors = append(validationErrors, invalidTypeMessage(serviceName, key, err)) + case "required": + validationErrors = append(validationErrors, fmt.Sprintf("Service '%s' option '%s' is invalid, %s", serviceName, key, err.Description())) + case "missing_dependency": + dependency := err.Details()["dependency"].(string) + validationErrors = append(validationErrors, fmt.Sprintf("Invalid configuration for '%s' service: dependency '%s' is not satisfied", serviceName, dependency)) + case "unique": + contextWithDuplicates := getValue(serviceMap, err.Context().String()) + validationErrors = append(validationErrors, fmt.Sprintf("Service '%s' configuration key '%s' value %s has non-unique elements", serviceName, key, contextWithDuplicates)) + default: + validationErrors = append(validationErrors, fmt.Sprintf("Service '%s' configuration key %s value %s", serviceName, key, err.Description())) + } + } + } + + return fmt.Errorf(strings.Join(validationErrors, "\n")) + } + + return nil +} + +func validateServiceConstraints(service RawService, serviceName string) error { + if err := setupSchemaLoaders(); err != nil { + return err + } + + service = convertServiceKeysToStrings(service) + + var validationErrors []string + + dataLoader := gojsonschema.NewGoLoader(service) + + result, err := gojsonschema.Validate(constraintSchemaLoader, dataLoader) + if err != nil { + return err + } + + if !result.Valid() { + for _, err := range result.Errors() { + if err.Type() == "number_any_of" { + _, containsImage := service["image"] + _, containsBuild := service["build"] + _, containsDockerfile := service["dockerfile"] + + if containsImage && containsBuild { + validationErrors = append(validationErrors, fmt.Sprintf("Service '%s' has both an image and build path specified. A service can either be built to image or use an existing image, not both.", serviceName)) + } else if !containsImage && !containsBuild { + validationErrors = append(validationErrors, fmt.Sprintf("Service '%s' has neither an image nor a build path specified. Exactly one must be provided.", serviceName)) + } else if containsImage && containsDockerfile { + validationErrors = append(validationErrors, fmt.Sprintf("Service '%s' has both an image and alternate Dockerfile. A service can either be built to image or use an existing image, not both.", serviceName)) + } + } + } + + return fmt.Errorf(strings.Join(validationErrors, "\n")) + } + + return nil +} diff --git a/vendor/github.com/docker/libcompose/docker/auth.go b/vendor/github.com/docker/libcompose/docker/auth.go new file mode 100644 index 00000000..f5023636 --- /dev/null +++ b/vendor/github.com/docker/libcompose/docker/auth.go @@ -0,0 +1,40 @@ +package docker + +import ( + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" +) + +// AuthLookup defines a method for looking up authentication information +type AuthLookup interface { + All() map[string]types.AuthConfig + Lookup(repoInfo *registry.RepositoryInfo) types.AuthConfig +} + +// ConfigAuthLookup implements AuthLookup by reading a Docker config file +type ConfigAuthLookup struct { + context *Context +} + +// NewConfigAuthLookup creates a new ConfigAuthLookup for a given context +func NewConfigAuthLookup(context *Context) *ConfigAuthLookup { + return &ConfigAuthLookup{ + context: context, + } +} + +// Lookup uses a Docker config file to lookup authentication information +func (c *ConfigAuthLookup) Lookup(repoInfo *registry.RepositoryInfo) types.AuthConfig { + if c.context.ConfigFile == nil || repoInfo == nil || repoInfo.Index == nil { + return types.AuthConfig{} + } + return registry.ResolveAuthConfig(c.context.ConfigFile.AuthConfigs, repoInfo.Index) +} + +// All uses a Docker config file to get all authentication information +func (c *ConfigAuthLookup) All() map[string]types.AuthConfig { + if c.context.ConfigFile == nil { + return map[string]types.AuthConfig{} + } + return c.context.ConfigFile.AuthConfigs +} diff --git a/vendor/github.com/docker/libcompose/docker/builder.go b/vendor/github.com/docker/libcompose/docker/builder/builder.go similarity index 55% rename from vendor/github.com/docker/libcompose/docker/builder.go rename to vendor/github.com/docker/libcompose/docker/builder/builder.go index 1fd81ebf..6b88a36b 100644 --- a/vendor/github.com/docker/libcompose/docker/builder.go +++ b/vendor/github.com/docker/libcompose/docker/builder/builder.go @@ -1,7 +1,6 @@ -package docker +package builder import ( - "encoding/json" "fmt" "io" "os" @@ -9,12 +8,19 @@ import ( "path/filepath" "strings" + "golang.org/x/net/context" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerignore" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/utils" - "github.com/docker/libcompose/project" - dockerclient "github.com/fsouza/go-dockerclient" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/term" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" ) // DefaultDockerfileName is the default name of a Dockerfile @@ -23,91 +29,89 @@ const DefaultDockerfileName = "Dockerfile" // Builder defines methods to provide a docker builder. This makes libcompose // not tied up to the docker daemon builder. type Builder interface { - Build(p *project.Project, service project.Service) (string, error) + Build(imageName string) error } // DaemonBuilder is the daemon "docker build" Builder implementation. type DaemonBuilder struct { - context *Context -} - -// NewDaemonBuilder creates a DaemonBuilder based on the specified context. -func NewDaemonBuilder(context *Context) *DaemonBuilder { - return &DaemonBuilder{ - context: context, - } -} - -type builderWriter struct { - out io.Writer -} - -func (w *builderWriter) Write(bytes []byte) (int, error) { - data := map[string]interface{}{} - err := json.Unmarshal(bytes, &data) - if stream, ok := data["stream"]; err == nil && ok { - fmt.Fprint(w.out, stream) - } - return len(bytes), nil + Client client.APIClient + ContextDirectory string + Dockerfile string + AuthConfigs map[string]types.AuthConfig + NoCache bool + ForceRemove bool + Pull bool } // Build implements Builder. It consumes the docker build API endpoint and sends // a tar of the specified service build context. -func (d *DaemonBuilder) Build(p *project.Project, service project.Service) (string, error) { - if service.Config().Build == "" { - return service.Config().Image, nil - } - - tag := fmt.Sprintf("%s_%s", p.Name, service.Name()) - context, err := CreateTar(p, service.Name()) +func (d *DaemonBuilder) Build(ctx context.Context, imageName string) error { + buildCtx, err := createTar(d.ContextDirectory, d.Dockerfile) if err != nil { - return "", err + return err } + defer buildCtx.Close() - defer context.Close() + var progBuff io.Writer = os.Stdout + var buildBuff io.Writer = os.Stdout - client := d.context.ClientFactory.Create(service) + // Setup an upload progress bar + progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) - logrus.Infof("Building %s...", tag) - err = client.BuildImage(dockerclient.BuildImageOptions{ - InputStream: context, - OutputStream: &builderWriter{out: os.Stderr}, - RawJSONStream: true, - Name: tag, - RmTmpContainer: true, - Dockerfile: service.Config().Dockerfile, + var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") + + logrus.Infof("Building %s...", imageName) + + outFd, isTerminalOut := term.GetFdInfo(os.Stdout) + + response, err := d.Client.ImageBuild(ctx, types.ImageBuildOptions{ + Context: body, + Tags: []string{imageName}, + NoCache: d.NoCache, + Remove: true, + ForceRemove: d.ForceRemove, + PullParent: d.Pull, + Dockerfile: d.Dockerfile, + AuthConfigs: d.AuthConfigs, }) if err != nil { - return "", err + return err } - return tag, nil + err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, outFd, isTerminalOut, nil) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + return fmt.Errorf("Status: %s, Code: %d", jerr.Message, jerr.Code) + } + } + return err } // CreateTar create a build context tar for the specified project and service name. -func CreateTar(p *project.Project, name string) (io.ReadCloser, error) { +func createTar(contextDirectory, dockerfile string) (io.ReadCloser, error) { // This code was ripped off from docker/api/client/build.go + dockerfileName := filepath.Join(contextDirectory, dockerfile) - serviceConfig := p.Configs[name] - root := serviceConfig.Build - dockerfileName := filepath.Join(root, serviceConfig.Dockerfile) - - absRoot, err := filepath.Abs(root) + absContextDirectory, err := filepath.Abs(contextDirectory) if err != nil { return nil, err } filename := dockerfileName - if dockerfileName == "" { + if dockerfile == "" { // No -f/--file was specified so use the default dockerfileName = DefaultDockerfileName - filename = filepath.Join(absRoot, dockerfileName) + filename = filepath.Join(absContextDirectory, dockerfileName) // Just to be nice ;-) look for 'dockerfile' too but only // use it if we found it, otherwise ignore this check if _, err = os.Lstat(filename); os.IsNotExist(err) { - tmpFN := path.Join(absRoot, strings.ToLower(dockerfileName)) + tmpFN := path.Join(absContextDirectory, strings.ToLower(dockerfileName)) if _, err = os.Lstat(tmpFN); err == nil { dockerfileName = strings.ToLower(dockerfileName) filename = tmpFN @@ -121,7 +125,7 @@ func CreateTar(p *project.Project, name string) (io.ReadCloser, error) { } // Now reset the dockerfileName to be relative to the build context - dockerfileName, err = filepath.Rel(absRoot, filename) + dockerfileName, err = filepath.Rel(absContextDirectory, filename) if err != nil { return nil, err } @@ -138,7 +142,7 @@ func CreateTar(p *project.Project, name string) (io.ReadCloser, error) { var includes = []string{"."} var excludes []string - dockerIgnorePath := path.Join(root, ".dockerignore") + dockerIgnorePath := path.Join(contextDirectory, ".dockerignore") dockerIgnore, err := os.Open(dockerIgnorePath) if err != nil { if !os.IsNotExist(err) { @@ -147,7 +151,7 @@ func CreateTar(p *project.Project, name string) (io.ReadCloser, error) { logrus.Warnf("Error while reading .dockerignore (%s) : %s", dockerIgnorePath, err.Error()) excludes = make([]string, 0) } else { - excludes, err = utils.ReadDockerIgnore(dockerIgnore) + excludes, err = dockerignore.ReadAll(dockerIgnore) if err != nil { return nil, err } @@ -165,7 +169,7 @@ func CreateTar(p *project.Project, name string) (io.ReadCloser, error) { includes = append(includes, ".dockerignore", dockerfileName) } - if err := utils.ValidateContextDirectory(root, excludes); err != nil { + if err := builder.ValidateContextDirectory(contextDirectory, excludes); err != nil { return nil, fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err) } @@ -175,5 +179,5 @@ func CreateTar(p *project.Project, name string) (io.ReadCloser, error) { IncludeFiles: includes, } - return archive.TarWithOptions(root, options) + return archive.TarWithOptions(contextDirectory, options) } diff --git a/vendor/github.com/docker/libcompose/docker/client.go b/vendor/github.com/docker/libcompose/docker/client/client.go similarity index 53% rename from vendor/github.com/docker/libcompose/docker/client.go rename to vendor/github.com/docker/libcompose/docker/client/client.go index ddda5941..3cd57ca9 100644 --- a/vendor/github.com/docker/libcompose/docker/client.go +++ b/vendor/github.com/docker/libcompose/docker/client/client.go @@ -1,21 +1,23 @@ -package docker +package client import ( "fmt" + "net/http" "os" "path/filepath" "runtime" "github.com/docker/docker/cliconfig" - "github.com/docker/docker/opts" "github.com/docker/docker/pkg/homedir" - "github.com/docker/docker/pkg/tlsconfig" - dockerclient "github.com/fsouza/go-dockerclient" + "github.com/docker/engine-api/client" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/docker/libcompose/version" ) const ( // DefaultAPIVersion is the default docker API version set by libcompose - DefaultAPIVersion = "1.20" + DefaultAPIVersion = "v1.20" defaultTrustKeyFile = "key.json" defaultCaFile = "ca.pem" defaultKeyFile = "key.pem" @@ -32,8 +34,8 @@ func init() { } } -// ClientOpts holds docker client options (host, tls, ..) -type ClientOpts struct { +// Options holds docker client options (host, tls, ..) +type Options struct { TLS bool TLSVerify bool TLSOptions tlsconfig.Options @@ -42,8 +44,24 @@ type ClientOpts struct { APIVersion string } -// CreateClient creates a docker client based on the specified options. -func CreateClient(c ClientOpts) (*dockerclient.Client, error) { +// Create creates a docker client based on the specified options. +func Create(c Options) (client.APIClient, error) { + if c.Host == "" { + if os.Getenv("DOCKER_API_VERSION") == "" { + os.Setenv("DOCKER_API_VERSION", DefaultAPIVersion) + } + client, err := client.NewEnvClient() + if err != nil { + return nil, err + } + return client, nil + } + + apiVersion := c.APIVersion + if apiVersion == "" { + apiVersion = DefaultAPIVersion + } + if c.TLSOptions.CAFile == "" { c.TLSOptions.CAFile = filepath.Join(dockerCertPath, defaultCaFile) } @@ -53,50 +71,45 @@ func CreateClient(c ClientOpts) (*dockerclient.Client, error) { if c.TLSOptions.KeyFile == "" { c.TLSOptions.KeyFile = filepath.Join(dockerCertPath, defaultKeyFile) } - - if c.Host == "" { - defaultHost := os.Getenv("DOCKER_HOST") - if defaultHost == "" { - if runtime.GOOS != "windows" { - // If we do not have a host, default to unix socket - defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket) - } else { - // If we do not have a host, default to TCP socket on Windows - defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort) - } - } - defaultHost, err := opts.ValidateHost(defaultHost) - if err != nil { - return nil, err - } - c.Host = defaultHost - } - if c.TrustKey == "" { c.TrustKey = filepath.Join(homedir.Get(), ".docker", defaultTrustKeyFile) } - if c.TLSVerify { c.TLS = true } - if c.TLS { c.TLSOptions.InsecureSkipVerify = !c.TLSVerify } - apiVersion := c.APIVersion - if apiVersion == "" { - apiVersion = DefaultAPIVersion - } + var httpClient *http.Client if c.TLS { - client, err := dockerclient.NewVersionedTLSClient(c.Host, c.TLSOptions.CertFile, c.TLSOptions.KeyFile, c.TLSOptions.CAFile, apiVersion) + config, err := tlsconfig.Client(c.TLSOptions) if err != nil { return nil, err } - if c.TLSOptions.InsecureSkipVerify { - client.TLSConfig.InsecureSkipVerify = true + tr := &http.Transport{ + TLSClientConfig: config, + } + proto, addr, _, err := client.ParseHost(c.Host) + if err != nil { + return nil, err + } + + if err := sockets.ConfigureTransport(tr, proto, addr); err != nil { + return nil, err + } + + httpClient = &http.Client{ + Transport: tr, } - return client, nil } - return dockerclient.NewVersionedClient(c.Host, apiVersion) + + customHeaders := map[string]string{} + customHeaders["User-Agent"] = fmt.Sprintf("Libcompose-Client/%s (%s)", version.VERSION, runtime.GOOS) + + client, err := client.NewClient(c.Host, apiVersion, httpClient, customHeaders) + if err != nil { + return nil, err + } + return client, nil } diff --git a/vendor/github.com/docker/libcompose/docker/container.go b/vendor/github.com/docker/libcompose/docker/container.go index 00f56be0..984322a4 100644 --- a/vendor/github.com/docker/libcompose/docker/container.go +++ b/vendor/github.com/docker/libcompose/docker/container.go @@ -2,78 +2,102 @@ package docker import ( "fmt" + "io" "math" "os" "strings" + "golang.org/x/net/context" + "github.com/Sirupsen/logrus" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/registry" - "github.com/docker/docker/utils" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-connections/nat" + "github.com/docker/libcompose/config" + "github.com/docker/libcompose/labels" "github.com/docker/libcompose/logger" "github.com/docker/libcompose/project" + "github.com/docker/libcompose/project/events" util "github.com/docker/libcompose/utils" - dockerclient "github.com/fsouza/go-dockerclient" ) -// DefaultTag is the name of the default tag of an image. -const DefaultTag = "latest" - // Container holds information about a docker container and the service it is tied on. -// It implements Service interface by encapsulating a EmptyService. type Container struct { - project.EmptyService + name string + serviceName string + projectName string + containerNumber int + oneOff bool + eventNotifier events.Notifier + loggerFactory logger.Factory + client client.APIClient - name string + // FIXME(vdemeester) Remove this dependency service *Service - client *dockerclient.Client } // NewContainer creates a container struct with the specified docker client, name and service. -func NewContainer(client *dockerclient.Client, name string, service *Service) *Container { +func NewContainer(client client.APIClient, name string, containerNumber int, service *Service) *Container { return &Container{ - client: client, - name: name, + client: client, + name: name, + containerNumber: containerNumber, + + // TODO(vdemeester) Move these to arguments + serviceName: service.name, + projectName: service.context.Project.Name, + eventNotifier: service.context.Project, + loggerFactory: service.context.LoggerFactory, + + // TODO(vdemeester) Remove this dependency service: service, } } -func (c *Container) findExisting() (*dockerclient.APIContainers, error) { - return GetContainerByName(c.client, c.name) +// NewOneOffContainer creates a "oneoff" container struct with the specified docker client, name and service. +func NewOneOffContainer(client client.APIClient, name string, containerNumber int, service *Service) *Container { + c := NewContainer(client, name, containerNumber, service) + c.oneOff = true + return c } -func (c *Container) findInfo() (*dockerclient.Container, error) { - container, err := c.findExisting() - if err != nil { - return nil, err - } - - return c.client.InspectContainer(container.ID) +func (c *Container) findExisting(ctx context.Context) (*types.ContainerJSON, error) { + return GetContainer(ctx, c.client, c.name) } // Info returns info about the container, like name, command, state or ports. -func (c *Container) Info(qFlag bool) (project.Info, error) { - container, err := c.findExisting() - if err != nil { +func (c *Container) Info(ctx context.Context, qFlag bool) (project.Info, error) { + container, err := c.findExisting(ctx) + if err != nil || container == nil { return nil, err } - result := project.Info{} + infos, err := GetContainersByFilter(ctx, c.client, map[string][]string{ + "name": {container.Name}, + }) + if err != nil || len(infos) == 0 { + return nil, err + } + info := infos[0] + result := project.Info{} if qFlag { result = append(result, project.InfoPart{Key: "Id", Value: container.ID}) } else { - result = append(result, project.InfoPart{Key: "Name", Value: name(container.Names)}) - result = append(result, project.InfoPart{Key: "Command", Value: container.Command}) - result = append(result, project.InfoPart{Key: "State", Value: container.Status}) - result = append(result, project.InfoPart{Key: "Ports", Value: portString(container.Ports)}) + result = append(result, project.InfoPart{Key: "Name", Value: name(info.Names)}) + result = append(result, project.InfoPart{Key: "Command", Value: info.Command}) + result = append(result, project.InfoPart{Key: "State", Value: info.Status}) + result = append(result, project.InfoPart{Key: "Ports", Value: portString(info.Ports)}) } return result, nil } -func portString(ports []dockerclient.APIPort) string { +func portString(ports []types.Port) string { result := []string{} for _, port := range ports { @@ -103,40 +127,45 @@ func name(names []string) string { // Recreate will not refresh the container by means of relaxation and enjoyment, // just delete it and create a new one with the current configuration -func (c *Container) Recreate(imageName string) (*dockerclient.APIContainers, error) { - info, err := c.findInfo() - if err != nil { +func (c *Container) Recreate(ctx context.Context, imageName string) (*types.ContainerJSON, error) { + container, err := c.findExisting(ctx) + if err != nil || container == nil { return nil, err - } else if info == nil { - return nil, fmt.Errorf("Can not find container to recreate for service: %s", c.service.Name()) } - hash := info.Config.Labels[HASH.Str()] + hash := container.Config.Labels[labels.HASH.Str()] + legacyHash := container.Config.Labels[labels.HASH_LEGACY.Str()] + + if hash == "" && legacyHash != "" { + hash = legacyHash + } if hash == "" { - return nil, fmt.Errorf("Failed to find hash on old container: %s", info.Name) + return nil, fmt.Errorf("Failed to find hash on old container: %s", container.Name) } - name := info.Name[1:] - newName := fmt.Sprintf("%s_%s", name, info.ID[:12]) + name := container.Name[1:] + newName := fmt.Sprintf("%s_%s", name, container.ID[:12]) logrus.Debugf("Renaming %s => %s", name, newName) - if err := c.client.RenameContainer(dockerclient.RenameContainerOptions{ID: info.ID, Name: newName}); err != nil { + if err := c.client.ContainerRename(ctx, container.ID, newName); err != nil { logrus.Errorf("Failed to rename old container %s", c.name) return nil, err } - newContainer, err := c.createContainer(imageName, info.ID) + newContainer, err := c.createContainer(ctx, imageName, container.ID, nil) if err != nil { return nil, err } logrus.Debugf("Created replacement container %s", newContainer.ID) - if err := c.client.RemoveContainer( - dockerclient.RemoveContainerOptions{ID: info.ID, Force: true, RemoveVolumes: false}); err != nil { - + if err := c.client.ContainerRemove(ctx, types.ContainerRemoveOptions{ + ContainerID: container.ID, + Force: true, + RemoveVolumes: false, + }); err != nil { logrus.Errorf("Failed to remove old container %s", c.name) return nil, err } - logrus.Debugf("Removed old container %s %s", c.name, info.ID) + logrus.Debugf("Removed old container %s %s", c.name, container.ID) return newContainer, nil } @@ -144,18 +173,25 @@ func (c *Container) Recreate(imageName string) (*dockerclient.APIContainers, err // Create creates the container based on the specified image name and send an event // to notify the container has been created. If the container already exists, does // nothing. -func (c *Container) Create(imageName string) (*dockerclient.APIContainers, error) { - container, err := c.findExisting() +func (c *Container) Create(ctx context.Context, imageName string) (*types.ContainerJSON, error) { + return c.CreateWithOverride(ctx, imageName, nil) +} + +// CreateWithOverride create container and override parts of the config to +// allow special situations to override the config generated from the compose +// file +func (c *Container) CreateWithOverride(ctx context.Context, imageName string, configOverride *config.ServiceConfig) (*types.ContainerJSON, error) { + container, err := c.findExisting(ctx) if err != nil { return nil, err } if container == nil { - container, err = c.createContainer(imageName, "") + container, err = c.createContainer(ctx, imageName, "", configOverride) if err != nil { return nil, err } - c.service.context.Project.Notify(project.EventContainerCreated, c.service.Name(), map[string]string{ + c.eventNotifier.Notify(events.ContainerCreated, c.serviceName, map[string]string{ "name": c.Name(), }) } @@ -163,115 +199,261 @@ func (c *Container) Create(imageName string) (*dockerclient.APIContainers, error return container, err } -// Down stops the container. -func (c *Container) Down() error { - return c.withContainer(func(container *dockerclient.APIContainers) error { - return c.client.StopContainer(container.ID, c.service.context.Timeout) +// Stop stops the container. +func (c *Container) Stop(ctx context.Context, timeout int) error { + return c.withContainer(ctx, func(container *types.ContainerJSON) error { + return c.client.ContainerStop(ctx, container.ID, timeout) + }) +} + +// Pause pauses the container. If the containers are already paused, don't fail. +func (c *Container) Pause(ctx context.Context) error { + return c.withContainer(ctx, func(container *types.ContainerJSON) error { + if !container.State.Paused { + return c.client.ContainerPause(ctx, container.ID) + } + return nil + }) +} + +// Unpause unpauses the container. If the containers are not paused, don't fail. +func (c *Container) Unpause(ctx context.Context) error { + return c.withContainer(ctx, func(container *types.ContainerJSON) error { + if container.State.Paused { + return c.client.ContainerUnpause(ctx, container.ID) + } + return nil }) } // Kill kill the container. -func (c *Container) Kill() error { - return c.withContainer(func(container *dockerclient.APIContainers) error { - return c.client.KillContainer(dockerclient.KillContainerOptions{ID: container.ID, Signal: dockerclient.Signal(c.service.context.Signal)}) +func (c *Container) Kill(ctx context.Context, signal string) error { + return c.withContainer(ctx, func(container *types.ContainerJSON) error { + return c.client.ContainerKill(ctx, container.ID, signal) }) } // Delete removes the container if existing. If the container is running, it tries // to stop it first. -func (c *Container) Delete() error { - container, err := c.findExisting() +func (c *Container) Delete(ctx context.Context, removeVolume bool) error { + container, err := c.findExisting(ctx) if err != nil || container == nil { return err } - info, err := c.client.InspectContainer(container.ID) - if err != nil { - return err - } - - if info.State.Running { - err := c.client.StopContainer(container.ID, c.service.context.Timeout) - if err != nil { - return err - } - } - - return c.client.RemoveContainer(dockerclient.RemoveContainerOptions{ID: container.ID, Force: true, RemoveVolumes: c.service.context.Volume}) -} - -// Up creates and start the container based on the image name and send an event -// to notify the container has been created. If the container exists but is stopped -// it tries to start it. -func (c *Container) Up(imageName string) error { - var err error - - defer func() { - if err == nil && c.service.context.Log { - go c.Log() - } - }() - - container, err := c.Create(imageName) - if err != nil { - return err - } - - info, err := c.client.InspectContainer(container.ID) + info, err := c.client.ContainerInspect(ctx, container.ID) if err != nil { return err } if !info.State.Running { - logrus.WithFields(logrus.Fields{"container.ID": container.ID, "c.name": c.name}).Debug("Starting container") - if err = c.client.StartContainer(container.ID, nil); err != nil { - logrus.WithFields(logrus.Fields{"container.ID": container.ID, "c.name": c.name}).Debug("Failed to start container") - return err - } - - c.service.context.Project.Notify(project.EventContainerStarted, c.service.Name(), map[string]string{ - "name": c.Name(), + return c.client.ContainerRemove(ctx, types.ContainerRemoveOptions{ + ContainerID: container.ID, + Force: true, + RemoveVolumes: removeVolume, }) } return nil } +// IsRunning returns the running state of the container. +func (c *Container) IsRunning(ctx context.Context) (bool, error) { + container, err := c.findExisting(ctx) + if err != nil || container == nil { + return false, err + } + + info, err := c.client.ContainerInspect(ctx, container.ID) + if err != nil { + return false, err + } + + return info.State.Running, nil +} + +// Run creates, start and attach to the container based on the image name, +// the specified configuration. +// It will always create a new container. +func (c *Container) Run(ctx context.Context, imageName string, configOverride *config.ServiceConfig) (int, error) { + var ( + errCh chan error + out, stderr io.Writer + in io.ReadCloser + ) + + container, err := c.createContainer(ctx, imageName, "", configOverride) + if err != nil { + return -1, err + } + + if configOverride.StdinOpen { + in = os.Stdin + } + if configOverride.Tty { + out = os.Stdout + } + if configOverride.Tty { + stderr = os.Stderr + } + + options := types.ContainerAttachOptions{ + ContainerID: container.ID, + Stream: true, + Stdin: configOverride.StdinOpen, + Stdout: configOverride.Tty, + Stderr: configOverride.Tty, + } + + resp, err := c.client.ContainerAttach(ctx, options) + if err != nil { + return -1, err + } + + // set raw terminal + inFd, _ := term.GetFdInfo(in) + state, err := term.SetRawTerminal(inFd) + if err != nil { + return -1, err + } + // restore raw terminal + defer term.RestoreTerminal(inFd, state) + // holdHijackedConnection (in goroutine) + errCh = promise.Go(func() error { + return holdHijackedConnection(configOverride.Tty, in, out, stderr, resp) + }) + + if err := c.client.ContainerStart(ctx, container.ID); err != nil { + return -1, err + } + + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return -1, err + } + + exitedContainer, err := c.client.ContainerInspect(ctx, container.ID) + if err != nil { + return -1, err + } + + return exitedContainer.State.ExitCode, nil +} + +func holdHijackedConnection(tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { + var err error + receiveStdout := make(chan error, 1) + if outputStream != nil || errorStream != nil { + go func() { + // When TTY is ON, use regular copy + if tty && outputStream != nil { + _, err = io.Copy(outputStream, resp.Reader) + } else { + _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) + } + logrus.Debugf("[hijack] End of stdout") + receiveStdout <- err + }() + } + + stdinDone := make(chan struct{}) + go func() { + if inputStream != nil { + io.Copy(resp.Conn, inputStream) + logrus.Debugf("[hijack] End of stdin") + } + + if err := resp.CloseWrite(); err != nil { + logrus.Debugf("Couldn't send EOF: %s", err) + } + close(stdinDone) + }() + + select { + case err := <-receiveStdout: + if err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + case <-stdinDone: + if outputStream != nil || errorStream != nil { + if err := <-receiveStdout; err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + } + } + + return nil +} + +// Up creates and start the container based on the image name and send an event +// to notify the container has been created. If the container exists but is stopped +// it tries to start it. +func (c *Container) Up(ctx context.Context, imageName string) error { + var err error + + container, err := c.Create(ctx, imageName) + if err != nil { + return err + } + + if !container.State.Running { + c.Start(container) + } + + return nil +} + +// Start the specified container with the specified host config +func (c *Container) Start(container *types.ContainerJSON) error { + logrus.WithFields(logrus.Fields{"container.ID": container.ID, "c.name": c.name}).Debug("Starting container") + if err := c.client.ContainerStart(context.Background(), container.ID); err != nil { + logrus.WithFields(logrus.Fields{"container.ID": container.ID, "c.name": c.name}).Debug("Failed to start container") + return err + } + c.eventNotifier.Notify(events.ContainerStarted, c.serviceName, map[string]string{ + "name": c.Name(), + }) + return nil +} + // OutOfSync checks if the container is out of sync with the service definition. // It looks if the the service hash container label is the same as the computed one. -func (c *Container) OutOfSync(imageName string) (bool, error) { - info, err := c.findInfo() - if err != nil || info == nil { +func (c *Container) OutOfSync(ctx context.Context, imageName string) (bool, error) { + container, err := c.findExisting(ctx) + if err != nil || container == nil { return false, err } - if info.Config.Image != imageName { - logrus.Debugf("Images for %s do not match %s!=%s", c.name, info.Config.Image, imageName) + if container.Config.Image != imageName { + logrus.Debugf("Images for %s do not match %s!=%s", c.name, container.Config.Image, imageName) return true, nil } - if info.Config.Labels[HASH.Str()] != c.getHash() { - logrus.Debugf("Hashes for %s do not match %s!=%s", c.name, info.Config.Labels[HASH.Str()], c.getHash()) + if container.Config.Labels[labels.HASH.Str()] != c.getHash() { + logrus.Debugf("Hashes for %s do not match %s!=%s", c.name, container.Config.Labels[labels.HASH.Str()], c.getHash()) return true, nil } - image, err := c.client.InspectImage(info.Config.Image) - if err != nil && (err.Error() == "Not found" || image == nil) { - logrus.Debugf("Image %s do not exist, do not know if it's out of sync", info.Config.Image) - return false, nil - } else if err != nil { + image, _, err := c.client.ImageInspectWithRaw(ctx, container.Config.Image, false) + if err != nil { + if client.IsErrImageNotFound(err) { + logrus.Debugf("Image %s do not exist, do not know if it's out of sync", container.Config.Image) + return false, nil + } return false, err } - logrus.Debugf("Checking existing image name vs id: %s == %s", image.ID, info.Image) - return image.ID != info.Image, err + logrus.Debugf("Checking existing image name vs id: %s == %s", image.ID, container.Image) + return image.ID != container.Image, err } func (c *Container) getHash() string { - return project.GetServiceHash(c.service.Name(), c.service.Config()) + return config.GetServiceHash(c.serviceName, c.service.Config()) } -func volumeBinds(volumes map[string]struct{}, container *dockerclient.Container) []string { +func volumeBinds(volumes map[string]struct{}, container *types.ContainerJSON) []string { result := make([]string, 0, len(container.Mounts)) for _, mount := range container.Mounts { if _, ok := volumes[mount.Destination]; ok { @@ -281,62 +463,65 @@ func volumeBinds(volumes map[string]struct{}, container *dockerclient.Container) return result } -func (c *Container) createContainer(imageName, oldContainer string) (*dockerclient.APIContainers, error) { - createOpts, err := ConvertToAPI(c.service.serviceConfig, c.name) +func (c *Container) createContainer(ctx context.Context, imageName, oldContainer string, configOverride *config.ServiceConfig) (*types.ContainerJSON, error) { + serviceConfig := c.service.serviceConfig + if configOverride != nil { + serviceConfig.Command = configOverride.Command + serviceConfig.Tty = configOverride.Tty + serviceConfig.StdinOpen = configOverride.StdinOpen + } + configWrapper, err := ConvertToAPI(c.service) if err != nil { return nil, err } - createOpts.Config.Image = imageName + configWrapper.Config.Image = imageName - if createOpts.Config.Labels == nil { - createOpts.Config.Labels = map[string]string{} + if configWrapper.Config.Labels == nil { + configWrapper.Config.Labels = map[string]string{} } - createOpts.Config.Labels[NAME.Str()] = c.name - createOpts.Config.Labels[SERVICE.Str()] = c.service.name - createOpts.Config.Labels[PROJECT.Str()] = c.service.context.Project.Name - createOpts.Config.Labels[HASH.Str()] = c.getHash() + oneOffString := "False" + if c.oneOff { + oneOffString = "True" + } - err = c.populateAdditionalHostConfig(createOpts.HostConfig) + configWrapper.Config.Labels[labels.SERVICE.Str()] = c.serviceName + configWrapper.Config.Labels[labels.PROJECT.Str()] = c.projectName + configWrapper.Config.Labels[labels.HASH.Str()] = c.getHash() + configWrapper.Config.Labels[labels.ONEOFF.Str()] = oneOffString + configWrapper.Config.Labels[labels.NUMBER.Str()] = fmt.Sprint(c.containerNumber) + configWrapper.Config.Labels[labels.VERSION.Str()] = ComposeVersion + + err = c.populateAdditionalHostConfig(configWrapper.HostConfig) if err != nil { return nil, err } if oldContainer != "" { - info, err := c.client.InspectContainer(oldContainer) + info, err := c.client.ContainerInspect(ctx, oldContainer) if err != nil { return nil, err } - createOpts.HostConfig.Binds = util.Merge(createOpts.HostConfig.Binds, volumeBinds(createOpts.Config.Volumes, info)) + configWrapper.HostConfig.Binds = util.Merge(configWrapper.HostConfig.Binds, volumeBinds(configWrapper.Config.Volumes, &info)) } - logrus.Debugf("Creating container %s %#v", c.name, createOpts) - - container, err := c.client.CreateContainer(*createOpts) - if err != nil && err == dockerclient.ErrNoSuchImage { - logrus.Debugf("Not Found, pulling image %s", createOpts.Config.Image) - if err = c.pull(createOpts.Config.Image); err != nil { - return nil, err - } - if container, err = c.client.CreateContainer(*createOpts); err != nil { - return nil, err - } - } + logrus.Debugf("Creating container %s %#v", c.name, configWrapper) + container, err := c.client.ContainerCreate(ctx, configWrapper.Config, configWrapper.HostConfig, configWrapper.NetworkingConfig, c.name) if err != nil { logrus.Debugf("Failed to create container %s: %v", c.name, err) return nil, err } - return GetContainerByID(c.client, container.ID) + return GetContainer(ctx, c.client, container.ID) } -func (c *Container) populateAdditionalHostConfig(hostConfig *dockerclient.HostConfig) error { +func (c *Container) populateAdditionalHostConfig(hostConfig *container.HostConfig) error { links := map[string]string{} for _, link := range c.service.DependentServices() { - if _, ok := c.service.context.Project.Configs[link.Target]; !ok { + if !c.service.context.Project.ServiceConfigs.Has(link.Target) { continue } @@ -345,7 +530,8 @@ func (c *Container) populateAdditionalHostConfig(hostConfig *dockerclient.HostCo return err } - containers, err := service.Containers() + // FIXME(vdemeester) container should not know service + containers, err := service.Containers(context.Background()) if err != nil { return err } @@ -384,7 +570,7 @@ func (c *Container) addLinks(links map[string]string, service project.Service, r } } -func (c *Container) addIpc(config *dockerclient.HostConfig, service project.Service, containers []project.Container) (*dockerclient.HostConfig, error) { +func (c *Container) addIpc(config *container.HostConfig, service project.Service, containers []project.Container) (*container.HostConfig, error) { if len(containers) == 0 { return nil, fmt.Errorf("Failed to find container for IPC %v", c.service.Config().Ipc) } @@ -394,13 +580,13 @@ func (c *Container) addIpc(config *dockerclient.HostConfig, service project.Serv return nil, err } - config.IpcMode = "container:" + id + config.IpcMode = container.IpcMode("container:" + id) return config, nil } -func (c *Container) addNetNs(config *dockerclient.HostConfig, service project.Service, containers []project.Container) (*dockerclient.HostConfig, error) { +func (c *Container) addNetNs(config *container.HostConfig, service project.Service, containers []project.Container) (*container.HostConfig, error) { if len(containers) == 0 { - return nil, fmt.Errorf("Failed to find container for networks ns %v", c.service.Config().Net) + return nil, fmt.Errorf("Failed to find container for networks ns %v", c.service.Config().NetworkMode) } id, err := containers[0].ID() @@ -408,13 +594,14 @@ func (c *Container) addNetNs(config *dockerclient.HostConfig, service project.Se return nil, err } - config.NetworkMode = "container:" + id + config.NetworkMode = container.NetworkMode("container:" + id) return config, nil } // ID returns the container Id. func (c *Container) ID() (string, error) { - container, err := c.findExisting() + // FIXME(vdemeester) container should not ask for his ID.. + container, err := c.findExisting(context.Background()) if container == nil { return "", err } @@ -426,91 +613,57 @@ func (c *Container) Name() string { return c.name } -// Pull pulls the image the container is based on. -func (c *Container) Pull() error { - return c.pull(c.service.serviceConfig.Image) -} - // Restart restarts the container if existing, does nothing otherwise. -func (c *Container) Restart() error { - container, err := c.findExisting() +func (c *Container) Restart(ctx context.Context, timeout int) error { + container, err := c.findExisting(ctx) if err != nil || container == nil { return err } - return c.client.RestartContainer(container.ID, c.service.context.Timeout) + return c.client.ContainerRestart(ctx, container.ID, timeout) } // Log forwards container logs to the project configured logger. -func (c *Container) Log() error { - container, err := c.findExisting() +func (c *Container) Log(ctx context.Context, follow bool) error { + container, err := c.findExisting(ctx) if container == nil || err != nil { return err } - info, err := c.client.InspectContainer(container.ID) - if info == nil || err != nil { + info, err := c.client.ContainerInspect(ctx, container.ID) + if err != nil { return err } - l := c.service.context.LoggerFactory.Create(c.name) + // FIXME(vdemeester) update container struct to do less API calls + name := fmt.Sprintf("%s_%d", c.service.name, c.containerNumber) + l := c.loggerFactory.Create(name) - err = c.client.Logs(dockerclient.LogsOptions{ - Container: c.name, - Follow: true, - Stdout: true, - Stderr: true, - Tail: "0", - OutputStream: &logger.Wrapper{Logger: l}, - ErrorStream: &logger.Wrapper{Logger: l, Err: true}, - RawTerminal: info.Config.Tty, - }) + options := types.ContainerLogsOptions{ + ContainerID: c.name, + ShowStdout: true, + ShowStderr: true, + Follow: follow, + Tail: "all", + } + responseBody, err := c.client.ContainerLogs(ctx, options) + if err != nil { + return err + } + defer responseBody.Close() + + if info.Config.Tty { + _, err = io.Copy(&logger.Wrapper{Logger: l}, responseBody) + } else { + _, err = stdcopy.StdCopy(&logger.Wrapper{Logger: l}, &logger.Wrapper{Logger: l, Err: true}, responseBody) + } logrus.WithFields(logrus.Fields{"Logger": l, "err": err}).Debug("c.client.Logs() returned error") return err } -func (c *Container) pull(image string) error { - return pullImage(c.client, c.service, image) -} - -func pullImage(client *dockerclient.Client, service *Service, image string) error { - taglessRemote, tag := parsers.ParseRepositoryTag(image) - if tag == "" { - image = utils.ImageReference(taglessRemote, DefaultTag) - } - - repoInfo, err := registry.ParseRepositoryInfo(taglessRemote) - if err != nil { - return err - } - - authConfig := cliconfig.AuthConfig{} - if service.context.ConfigFile != nil && repoInfo != nil && repoInfo.Index != nil { - authConfig = registry.ResolveAuthConfig(service.context.ConfigFile, repoInfo.Index) - } - - err = client.PullImage( - dockerclient.PullImageOptions{ - Repository: image, - OutputStream: os.Stderr, // TODO maybe get the stream from some configured place - }, - dockerclient.AuthConfiguration{ - Username: authConfig.Username, - Password: authConfig.Password, - Email: authConfig.Email, - }, - ) - - if err != nil { - logrus.Errorf("Failed to pull image %s: %v", image, err) - } - - return err -} - -func (c *Container) withContainer(action func(*dockerclient.APIContainers) error) error { - container, err := c.findExisting() +func (c *Container) withContainer(ctx context.Context, action func(*types.ContainerJSON) error) error { + container, err := c.findExisting(ctx) if err != nil { return err } @@ -523,13 +676,13 @@ func (c *Container) withContainer(action func(*dockerclient.APIContainers) error } // Port returns the host port the specified port is mapped on. -func (c *Container) Port(port string) (string, error) { - info, err := c.findInfo() +func (c *Container) Port(ctx context.Context, port string) (string, error) { + container, err := c.findExisting(ctx) if err != nil { return "", err } - if bindings, ok := info.NetworkSettings.Ports[dockerclient.Port(port)]; ok { + if bindings, ok := container.NetworkSettings.Ports[nat.Port(port)]; ok { result := []string{} for _, binding := range bindings { result = append(result, binding.HostIP+":"+binding.HostPort) diff --git a/vendor/github.com/docker/libcompose/docker/context.go b/vendor/github.com/docker/libcompose/docker/context.go index 703e1f3c..5a35e677 100644 --- a/vendor/github.com/docker/libcompose/docker/context.go +++ b/vendor/github.com/docker/libcompose/docker/context.go @@ -9,10 +9,10 @@ import ( // client information (like configuration file, builder to use, …) type Context struct { project.Context - Builder Builder - ClientFactory ClientFactory + ClientFactory project.ClientFactory ConfigDir string ConfigFile *cliconfig.ConfigFile + AuthLookup AuthLookup } func (c *Context) open() error { diff --git a/vendor/github.com/docker/libcompose/docker/convert.go b/vendor/github.com/docker/libcompose/docker/convert.go index 60aac113..5654d56c 100644 --- a/vendor/github.com/docker/libcompose/docker/convert.go +++ b/vendor/github.com/docker/libcompose/docker/convert.go @@ -1,16 +1,27 @@ package docker import ( + "fmt" "strings" - "github.com/docker/docker/pkg/nat" - "github.com/docker/docker/runconfig" + "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/network" + "github.com/docker/engine-api/types/strslice" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" + "github.com/docker/libcompose/config" "github.com/docker/libcompose/project" "github.com/docker/libcompose/utils" - - dockerclient "github.com/fsouza/go-dockerclient" ) +// ConfigWrapper wraps Config, HostConfig and NetworkingConfig for a container. +type ConfigWrapper struct { + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + // Filter filters the specified string slice with the specified function. func Filter(vs []string, f func(string) bool) []string { r := make([]string, 0, len(vs)) @@ -31,39 +42,47 @@ func isVolume(s string) bool { } // ConvertToAPI converts a service configuration to a docker API container configuration. -func ConvertToAPI(c *project.ServiceConfig, name string) (*dockerclient.CreateContainerOptions, error) { - config, hostConfig, err := Convert(c) +func ConvertToAPI(s *Service) (*ConfigWrapper, error) { + config, hostConfig, err := Convert(s.serviceConfig, s.context.Context) if err != nil { return nil, err } - result := dockerclient.CreateContainerOptions{ - Name: name, + result := ConfigWrapper{ Config: config, HostConfig: hostConfig, } return &result, nil } -func volumes(c *project.ServiceConfig) map[string]struct{} { - vs := Filter(c.Volumes, isVolume) +func isNamedVolume(volume string) bool { + return !strings.HasPrefix(volume, ".") && !strings.HasPrefix(volume, "/") && !strings.HasPrefix(volume, "~") +} - volumes := make(map[string]struct{}, len(vs)) - for _, v := range vs { - volumes[v] = struct{}{} +func volumes(c *config.ServiceConfig, ctx project.Context) map[string]struct{} { + volumes := make(map[string]struct{}, len(c.Volumes)) + for k, v := range c.Volumes { + if len(ctx.ComposeFiles) > 0 && !isNamedVolume(v) { + v = ctx.ResourceLookup.ResolvePath(v, ctx.ComposeFiles[0]) + } + + c.Volumes[k] = v + if isVolume(v) { + volumes[v] = struct{}{} + } } return volumes } -func restartPolicy(c *project.ServiceConfig) (*dockerclient.RestartPolicy, error) { - restart, err := runconfig.ParseRestartPolicy(c.Restart) +func restartPolicy(c *config.ServiceConfig) (*container.RestartPolicy, error) { + restart, err := opts.ParseRestartPolicy(c.Restart) if err != nil { return nil, err } - return &dockerclient.RestartPolicy{Name: restart.Name, MaximumRetryCount: restart.MaximumRetryCount}, nil + return &container.RestartPolicy{Name: restart.Name, MaximumRetryCount: restart.MaximumRetryCount}, nil } -func ports(c *project.ServiceConfig) (map[dockerclient.Port]struct{}, map[dockerclient.Port][]dockerclient.PortBinding, error) { +func ports(c *config.ServiceConfig) (map[nat.Port]struct{}, nat.PortMap, error) { ports, binding, err := nat.ParsePortSpecs(c.Ports) if err != nil { return nil, nil, err @@ -78,24 +97,24 @@ func ports(c *project.ServiceConfig) (map[dockerclient.Port]struct{}, map[docker ports[k] = v } - exposedPorts := map[dockerclient.Port]struct{}{} + exposedPorts := map[nat.Port]struct{}{} for k, v := range ports { - exposedPorts[dockerclient.Port(k)] = v + exposedPorts[nat.Port(k)] = v } - portBindings := map[dockerclient.Port][]dockerclient.PortBinding{} + portBindings := nat.PortMap{} for k, bv := range binding { - dcbs := make([]dockerclient.PortBinding, len(bv)) + dcbs := make([]nat.PortBinding, len(bv)) for k, v := range bv { - dcbs[k] = dockerclient.PortBinding{HostIP: v.HostIP, HostPort: v.HostPort} + dcbs[k] = nat.PortBinding{HostIP: v.HostIP, HostPort: v.HostPort} } - portBindings[dockerclient.Port(k)] = dcbs + portBindings[nat.Port(k)] = dcbs } return exposedPorts, portBindings, nil } // Convert converts a service configuration to an docker API structures (Config and HostConfig) -func Convert(c *project.ServiceConfig) (*dockerclient.Config, *dockerclient.HostConfig, error) { +func Convert(c *config.ServiceConfig, ctx project.Context) (*container.Config, *container.HostConfig, error) { restartPolicy, err := restartPolicy(c) if err != nil { return nil, nil, err @@ -111,62 +130,108 @@ func Convert(c *project.ServiceConfig) (*dockerclient.Config, *dockerclient.Host return nil, nil, err } - config := &dockerclient.Config{ - Entrypoint: utils.CopySlice(c.Entrypoint.Slice()), + var volumesFrom []string + if c.VolumesFrom != nil { + volumesFrom, err = getVolumesFrom(c.VolumesFrom, ctx.Project.ServiceConfigs, ctx.ProjectName) + if err != nil { + return nil, nil, err + } + } + + config := &container.Config{ + Entrypoint: strslice.StrSlice(utils.CopySlice(c.Entrypoint)), Hostname: c.Hostname, Domainname: c.DomainName, User: c.User, - Env: utils.CopySlice(c.Environment.Slice()), - Cmd: utils.CopySlice(c.Command.Slice()), + Env: utils.CopySlice(c.Environment), + Cmd: strslice.StrSlice(utils.CopySlice(c.Command)), Image: c.Image, - Labels: utils.CopyMap(c.Labels.MapParts()), + Labels: utils.CopyMap(c.Labels), ExposedPorts: exposedPorts, Tty: c.Tty, OpenStdin: c.StdinOpen, WorkingDir: c.WorkingDir, - VolumeDriver: c.VolumeDriver, - Volumes: volumes(c), + Volumes: volumes(c, ctx), + MacAddress: c.MacAddress, } - hostConfig := &dockerclient.HostConfig{ - VolumesFrom: utils.CopySlice(c.VolumesFrom), - CapAdd: utils.CopySlice(c.CapAdd), - CapDrop: utils.CopySlice(c.CapDrop), - CPUShares: c.CPUShares, - CPUSetCPUs: c.CPUSet, + + ulimits := []*units.Ulimit{} + if c.Ulimits.Elements != nil { + for _, ulimit := range c.Ulimits.Elements { + ulimits = append(ulimits, &units.Ulimit{ + Name: ulimit.Name, + Soft: ulimit.Soft, + Hard: ulimit.Hard, + }) + } + } + + resources := container.Resources{ + CgroupParent: c.CgroupParent, + Memory: c.MemLimit, + MemorySwap: c.MemSwapLimit, + CPUShares: c.CPUShares, + CPUQuota: c.CPUQuota, + CpusetCpus: c.CPUSet, + Ulimits: ulimits, + Devices: deviceMappings, + } + + hostConfig := &container.HostConfig{ + VolumesFrom: volumesFrom, + CapAdd: strslice.StrSlice(utils.CopySlice(c.CapAdd)), + CapDrop: strslice.StrSlice(utils.CopySlice(c.CapDrop)), ExtraHosts: utils.CopySlice(c.ExtraHosts), Privileged: c.Privileged, Binds: Filter(c.Volumes, isBind), - Devices: deviceMappings, - DNS: utils.CopySlice(c.DNS.Slice()), - DNSSearch: utils.CopySlice(c.DNSSearch.Slice()), - LogConfig: dockerclient.LogConfig{ - Type: c.LogDriver, - Config: utils.CopyMap(c.LogOpt), + DNS: utils.CopySlice(c.DNS), + DNSSearch: utils.CopySlice(c.DNSSearch), + LogConfig: container.LogConfig{ + Type: c.Logging.Driver, + Config: utils.CopyMap(c.Logging.Options), }, - Memory: c.MemLimit, - MemorySwap: c.MemSwapLimit, - NetworkMode: c.Net, + NetworkMode: container.NetworkMode(c.NetworkMode), ReadonlyRootfs: c.ReadOnly, - PidMode: c.Pid, - UTSMode: c.Uts, - IpcMode: c.Ipc, + PidMode: container.PidMode(c.Pid), + UTSMode: container.UTSMode(c.Uts), + IpcMode: container.IpcMode(c.Ipc), PortBindings: portBindings, RestartPolicy: *restartPolicy, SecurityOpt: utils.CopySlice(c.SecurityOpt), + VolumeDriver: c.VolumeDriver, + Resources: resources, } return config, hostConfig, nil } -func parseDevices(devices []string) ([]dockerclient.Device, error) { +func getVolumesFrom(volumesFrom []string, serviceConfigs *config.ServiceConfigs, projectName string) ([]string, error) { + volumes := []string{} + for _, volumeFrom := range volumesFrom { + if serviceConfig, ok := serviceConfigs.Get(volumeFrom); ok { + // It's a service - Use the first one + name := fmt.Sprintf("%s_%s_1", projectName, volumeFrom) + // If a container name is specified, use that instead + if serviceConfig.ContainerName != "" { + name = serviceConfig.ContainerName + } + volumes = append(volumes, name) + } else { + volumes = append(volumes, volumeFrom) + } + } + return volumes, nil +} + +func parseDevices(devices []string) ([]container.DeviceMapping, error) { // parse device mappings - deviceMappings := []dockerclient.Device{} + deviceMappings := []container.DeviceMapping{} for _, device := range devices { - v, err := runconfig.ParseDevice(device) + v, err := opts.ParseDevice(device) if err != nil { return nil, err } - deviceMappings = append(deviceMappings, dockerclient.Device{ + deviceMappings = append(deviceMappings, container.DeviceMapping{ PathOnHost: v.PathOnHost, PathInContainer: v.PathInContainer, CgroupPermissions: v.CgroupPermissions, diff --git a/vendor/github.com/docker/libcompose/docker/convert_test.go b/vendor/github.com/docker/libcompose/docker/convert_test.go deleted file mode 100644 index a3e82c1b..00000000 --- a/vendor/github.com/docker/libcompose/docker/convert_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package docker - -import ( - "github.com/docker/libcompose/project" - shlex "github.com/flynn/go-shlex" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestParseCommand(t *testing.T) { - exp := []string{"sh", "-c", "exec /opt/bin/flanneld -logtostderr=true -iface=${NODE_IP}"} - cmd, err := shlex.Split("sh -c 'exec /opt/bin/flanneld -logtostderr=true -iface=${NODE_IP}'") - assert.Nil(t, err) - assert.Equal(t, exp, cmd) -} - -func TestParseBindsAndVolumes(t *testing.T) { - bashCmd := "bash" - fooLabel := "foo.label" - fooLabelValue := "service.config.value" - sc := &project.ServiceConfig{ - Entrypoint: project.NewCommand(bashCmd), - Volumes: []string{"/foo", "/home:/home", "/bar/baz", "/usr/lib:/usr/lib:ro"}, - Labels: project.NewSliceorMap(map[string]string{fooLabel: "service.config.value"}), - } - cfg, hostCfg, err := Convert(sc) - assert.Nil(t, err) - assert.Equal(t, map[string]struct{}{"/foo": {}, "/bar/baz": {}}, cfg.Volumes) - assert.Equal(t, []string{"/home:/home", "/usr/lib:/usr/lib:ro"}, hostCfg.Binds) - - cfg.Labels[fooLabel] = "FUN" - cfg.Entrypoint[0] = "less" - - assert.Equal(t, fooLabelValue, sc.Labels.MapParts()[fooLabel]) - assert.Equal(t, "FUN", cfg.Labels[fooLabel]) - - assert.Equal(t, []string{bashCmd}, sc.Entrypoint.Slice()) - assert.Equal(t, []string{"less"}, cfg.Entrypoint) -} diff --git a/vendor/github.com/docker/libcompose/docker/functions.go b/vendor/github.com/docker/libcompose/docker/functions.go index ff82ceba..1b5b90af 100644 --- a/vendor/github.com/docker/libcompose/docker/functions.go +++ b/vendor/github.com/docker/libcompose/docker/functions.go @@ -1,52 +1,42 @@ package docker import ( - dockerclient "github.com/fsouza/go-dockerclient" + "golang.org/x/net/context" + + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" ) // GetContainersByFilter looks up the hosts containers with the specified filters and // returns a list of container matching it, or an error. -func GetContainersByFilter(client *dockerclient.Client, filters ...map[string][]string) ([]dockerclient.APIContainers, error) { - var filterResult map[string][]string +func GetContainersByFilter(ctx context.Context, clientInstance client.APIClient, containerFilters ...map[string][]string) ([]types.Container, error) { + filterArgs := filters.NewArgs() - for _, filter := range filters { - if filterResult == nil { - filterResult = filter - } else { - filterResult = And(filterResult, filter) + // FIXME(vdemeester) I don't like 3 for loops >_< + for _, filter := range containerFilters { + for key, filterValue := range filter { + for _, value := range filterValue { + filterArgs.Add(key, value) + } } } - return client.ListContainers(dockerclient.ListContainersOptions{All: true, Filters: filterResult}) + return clientInstance.ContainerList(ctx, types.ContainerListOptions{ + All: true, + Filter: filterArgs, + }) } -// GetContainerByName looks up the hosts containers with the specified name and -// returns it, or an error. -func GetContainerByName(client *dockerclient.Client, name string) (*dockerclient.APIContainers, error) { - containers, err := client.ListContainers(dockerclient.ListContainersOptions{All: true, Filters: NAME.Eq(name)}) +// GetContainer looks up the hosts containers with the specified ID +// or name and returns it, or an error. +func GetContainer(ctx context.Context, clientInstance client.APIClient, id string) (*types.ContainerJSON, error) { + container, err := clientInstance.ContainerInspect(ctx, id) if err != nil { + if client.IsErrContainerNotFound(err) { + return nil, nil + } return nil, err } - - if len(containers) == 0 { - return nil, nil - } - - return &containers[0], nil -} - -// GetContainerByID looks up the hosts containers with the specified Id and -// returns it, or an error. -func GetContainerByID(client *dockerclient.Client, id string) (*dockerclient.APIContainers, error) { - containers, err := client.ListContainers( - dockerclient.ListContainersOptions{All: true, Filters: map[string][]string{"id": {id}}}) - if err != nil { - return nil, err - } - - if len(containers) == 0 { - return nil, nil - } - - return &containers[0], nil + return &container, nil } diff --git a/vendor/github.com/docker/libcompose/docker/image.go b/vendor/github.com/docker/libcompose/docker/image.go new file mode 100644 index 00000000..24ba1f13 --- /dev/null +++ b/vendor/github.com/docker/libcompose/docker/image.go @@ -0,0 +1,108 @@ +package docker + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" +) + +func removeImage(ctx context.Context, client client.APIClient, image string) error { + _, err := client.ImageRemove(ctx, types.ImageRemoveOptions{ + ImageID: image, + }) + return err +} + +func pullImage(ctx context.Context, client client.APIClient, service *Service, image string) error { + fmt.Fprintf(os.Stderr, "Pulling %s (%s)...\n", service.name, image) + distributionRef, err := reference.ParseNamed(image) + if err != nil { + return err + } + + repoInfo, err := registry.ParseRepositoryInfo(distributionRef) + if err != nil { + return err + } + + authConfig := service.context.AuthLookup.Lookup(repoInfo) + + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImagePullOptions{ + ImageID: distributionRef.String(), + Tag: "latest", + RegistryAuth: encodedAuth, + } + if named, ok := distributionRef.(reference.Named); ok { + options.ImageID = named.FullName() + } + if tagged, ok := distributionRef.(reference.NamedTagged); ok { + options.Tag = tagged.Tag() + } + + timeoutsRemaining := 3 + for i := 0; i < 100; i++ { + responseBody, err := client.ImagePull(ctx, options, nil) + if err != nil { + logrus.Errorf("Failed to pull image %s: %v", image, err) + return err + } + + var writeBuff io.Writer = os.Stderr + + outFd, isTerminalOut := term.GetFdInfo(os.Stderr) + + err = jsonmessage.DisplayJSONMessagesStream(responseBody, writeBuff, outFd, isTerminalOut, nil) + responseBody.Close() + if err == nil { + return nil + } else if strings.Contains(err.Error(), "timed out") { + timeoutsRemaining -= 1 + if timeoutsRemaining == 0 { + return err + } + continue + } else if strings.Contains(err.Error(), "connection") || strings.Contains(err.Error(), "unreachable") { + time.Sleep(300 * time.Millisecond) + continue + } else { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + fmt.Fprintf(os.Stderr, "%s", writeBuff) + return fmt.Errorf("Status: %s, Code: %d", jerr.Message, jerr.Code) + } + } + } + + return err +} + +// encodeAuthToBase64 serializes the auth configuration as JSON base64 payload +func encodeAuthToBase64(authConfig types.AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} diff --git a/vendor/github.com/docker/libcompose/docker/name.go b/vendor/github.com/docker/libcompose/docker/name.go index 4329eae0..07529469 100644 --- a/vendor/github.com/docker/libcompose/docker/name.go +++ b/vendor/github.com/docker/libcompose/docker/name.go @@ -2,23 +2,28 @@ package docker import ( "fmt" - "io" - "time" + "strconv" - dockerclient "github.com/fsouza/go-dockerclient" + "golang.org/x/net/context" + + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "github.com/docker/libcompose/labels" ) const format = "%s_%s_%d" // Namer defines method to provide container name. type Namer interface { - io.Closer - Next() string + Next() (string, int) } -type inOrderNamer struct { - names chan string - done chan bool +type defaultNamer struct { + project string + service string + oneOff bool + currentNumber int } type singleNamer struct { @@ -32,50 +37,61 @@ func NewSingleNamer(name string) Namer { // NewNamer returns a namer that returns names based on the specified project and // service name and an inner counter, e.g. project_service_1, project_service_2… -func NewNamer(client *dockerclient.Client, project, service string) Namer { - namer := &inOrderNamer{ - names: make(chan string), - done: make(chan bool), +func NewNamer(ctx context.Context, client client.APIClient, project, service string, oneOff bool) (Namer, error) { + namer := &defaultNamer{ + project: project, + service: service, + oneOff: oneOff, } - go func() { - for i := 1; true; i++ { - name := fmt.Sprintf(format, project, service, i) - c, err := GetContainerByName(client, name) - if err != nil { - // Sleep here to avoid crazy tight loop when things go south - time.Sleep(time.Second * 1) - continue - } - if c != nil { - continue - } + filter := filters.NewArgs() + filter.Add("label", fmt.Sprintf("%s=%s", labels.PROJECT.Str(), project)) + filter.Add("label", fmt.Sprintf("%s=%s", labels.SERVICE.Str(), service)) + if oneOff { + filter.Add("label", fmt.Sprintf("%s=%s", labels.ONEOFF.Str(), "True")) + } else { + filter.Add("label", fmt.Sprintf("%s=%s", labels.ONEOFF.Str(), "False")) + } - select { - case namer.names <- name: - case <-namer.done: - close(namer.names) - return - } + containers, err := client.ContainerList(ctx, types.ContainerListOptions{ + All: true, + Filter: filter, + }) + if err != nil { + return nil, err + } + + maxNumber := 0 + for _, container := range containers { + numberLabel := container.Labels[labels.NUMBER.Str()] + if numberLabel == "" { + namer.currentNumber = 1 + return namer, nil } - }() + number, err := strconv.Atoi(numberLabel) + if err != nil { + return nil, err + } + if number > maxNumber { + maxNumber = number + } + } + namer.currentNumber = maxNumber + 1 - return namer + return namer, nil } -func (i *inOrderNamer) Next() string { - return <-i.names +func (i *defaultNamer) Next() (string, int) { + service := i.service + if i.oneOff { + service = i.service + "_run" + } + name := fmt.Sprintf(format, i.project, service, i.currentNumber) + number := i.currentNumber + i.currentNumber = i.currentNumber + 1 + return name, number } -func (i *inOrderNamer) Close() error { - close(i.done) - return nil -} - -func (s *singleNamer) Next() string { - return s.name -} - -func (s *singleNamer) Close() error { - return nil +func (s *singleNamer) Next() (string, int) { + return s.name, 1 } diff --git a/vendor/github.com/docker/libcompose/docker/project.go b/vendor/github.com/docker/libcompose/docker/project.go index 30708088..79766999 100644 --- a/vendor/github.com/docker/libcompose/docker/project.go +++ b/vendor/github.com/docker/libcompose/docker/project.go @@ -1,20 +1,42 @@ package docker import ( - "github.com/Sirupsen/logrus" + "os" + "path/filepath" + "github.com/Sirupsen/logrus" + "github.com/docker/libcompose/config" + "github.com/docker/libcompose/docker/client" "github.com/docker/libcompose/lookup" "github.com/docker/libcompose/project" ) +// ComposeVersion is name of docker-compose.yml file syntax supported version +const ComposeVersion = "1.5.0" + // NewProject creates a Project with the specified context. -func NewProject(context *Context) (*project.Project, error) { - if context.ConfigLookup == nil { - context.ConfigLookup = &lookup.FileConfigLookup{} +func NewProject(context *Context, parseOptions *config.ParseOptions) (*project.Project, error) { + if context.ResourceLookup == nil { + context.ResourceLookup = &lookup.FileConfigLookup{} } if context.EnvironmentLookup == nil { - context.EnvironmentLookup = &lookup.OsEnvLookup{} + cwd, err := os.Getwd() + if err != nil { + return nil, err + } + context.EnvironmentLookup = &lookup.ComposableEnvLookup{ + Lookups: []config.EnvironmentLookup{ + &lookup.EnvfileLookup{ + Path: filepath.Join(cwd, ".env"), + }, + &lookup.OsEnvLookup{}, + }, + } + } + + if context.AuthLookup == nil { + context.AuthLookup = NewConfigAuthLookup(context) } if context.ServiceFactory == nil { @@ -23,19 +45,16 @@ func NewProject(context *Context) (*project.Project, error) { } } - if context.Builder == nil { - context.Builder = NewDaemonBuilder(context) - } - if context.ClientFactory == nil { - factory, err := NewDefaultClientFactory(ClientOpts{}) + factory, err := project.NewDefaultClientFactory(client.Options{}) if err != nil { return nil, err } context.ClientFactory = factory } - p := project.NewProject(&context.Context) + // FIXME(vdemeester) Remove the context duplication ? + p := project.NewProject(context.ClientFactory, &context.Context, parseOptions) err := p.Parse() if err != nil { diff --git a/vendor/github.com/docker/libcompose/docker/service.go b/vendor/github.com/docker/libcompose/docker/service.go index 183f4fde..ffc2411d 100644 --- a/vendor/github.com/docker/libcompose/docker/service.go +++ b/vendor/github.com/docker/libcompose/docker/service.go @@ -2,21 +2,31 @@ package docker import ( "fmt" + "strconv" + "strings" + + "golang.org/x/net/context" + "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/nat" + "github.com/docker/engine-api/client" + "github.com/docker/go-connections/nat" + "github.com/docker/libcompose/config" + "github.com/docker/libcompose/docker/builder" + "github.com/docker/libcompose/labels" "github.com/docker/libcompose/project" + "github.com/docker/libcompose/project/options" "github.com/docker/libcompose/utils" ) // Service is a project.Service implementations. type Service struct { name string - serviceConfig *project.ServiceConfig + serviceConfig *config.ServiceConfig context *Context } // NewService creates a service -func NewService(name string, serviceConfig *project.ServiceConfig, context *Context) *Service { +func NewService(name string, serviceConfig *config.ServiceConfig, context *Context) *Service { return &Service{ name: name, serviceConfig: serviceConfig, @@ -29,8 +39,8 @@ func (s *Service) Name() string { return s.name } -// Config returns the configuration of the service (project.ServiceConfig). -func (s *Service) Config() *project.ServiceConfig { +// Config returns the configuration of the service (config.ServiceConfig). +func (s *Service) Config() *config.ServiceConfig { return s.serviceConfig } @@ -39,36 +49,67 @@ func (s *Service) DependentServices() []project.ServiceRelationship { return project.DefaultDependentServices(s.context.Project, s) } -// Create implements Service.Create. -func (s *Service) Create() error { - imageName, err := s.build() +// Create implements Service.Create. It ensures the image exists or build it +// if it can and then create a container. +func (s *Service) Create(ctx context.Context, options options.Create) error { + containers, err := s.collectContainers(ctx) if err != nil { return err } - _, err = s.createOne(imageName) + imageName, err := s.ensureImageExists(ctx, options.NoBuild) + if err != nil { + return err + } + + if len(containers) != 0 { + return s.eachContainer(ctx, func(c *Container) error { + return s.recreateIfNeeded(ctx, imageName, c, options.NoRecreate, options.ForceRecreate) + }) + } + + _, err = s.createOne(ctx, imageName) return err } -func (s *Service) collectContainers() ([]*Container, error) { +func (s *Service) collectContainers(ctx context.Context) ([]*Container, error) { client := s.context.ClientFactory.Create(s) - containers, err := GetContainersByFilter(client, SERVICE.Eq(s.name), PROJECT.Eq(s.context.Project.Name)) + containers, err := GetContainersByFilter(ctx, client, labels.SERVICE.Eq(s.name), labels.PROJECT.Eq(s.context.Project.Name)) if err != nil { return nil, err } + legacyContainers, err := GetContainersByFilter(ctx, client, labels.SERVICE_LEGACY.Eq(s.name), labels.PROJECT_LEGACY.Eq(s.context.Project.Name)) + if err != nil { + return nil, err + } + + if len(containers) == 0 && len(legacyContainers) > 0 { + containers = legacyContainers + } + result := []*Container{} for _, container := range containers { - name := container.Labels[NAME.Str()] - result = append(result, NewContainer(client, name, s)) + numberLabel := container.Labels[labels.NUMBER.Str()] + name := strings.SplitAfter(container.Names[0], "/") + if numberLabel == "" { + result = append(result, NewContainer(client, name[1], 1, s)) + return result, nil + } + containerNumber, err := strconv.Atoi(numberLabel) + if err != nil { + return nil, err + } + // Compose add "/" before name, so Name[1] will store actaul name. + result = append(result, NewContainer(client, name[1], containerNumber, s)) } return result, nil } -func (s *Service) createOne(imageName string) (*Container, error) { - containers, err := s.constructContainers(imageName, 1) +func (s *Service) createOne(ctx context.Context, imageName string) (*Container, error) { + containers, err := s.constructContainers(ctx, imageName, 1) if err != nil { return nil, err } @@ -76,24 +117,69 @@ func (s *Service) createOne(imageName string) (*Container, error) { return containers[0], err } -// Build implements Service.Build. If an imageName is specified or if the context has -// no build to work with it will do nothing. Otherwise it will try to build -// the image and returns an error if any. -func (s *Service) Build() error { - _, err := s.build() +func (s *Service) ensureImageExists(ctx context.Context, noBuild bool) (string, error) { + err := s.imageExists() + + if err == nil { + return s.imageName(), nil + } + + if err != nil && !client.IsErrImageNotFound(err) { + return "", err + } + + if s.Config().Build.Context != "" { + if noBuild { + return "", fmt.Errorf("Service %q needs to be built, but no-build was specified", s.name) + } + return s.imageName(), s.build(ctx, options.Build{}) + } + + return s.imageName(), s.Pull(ctx) +} + +func (s *Service) imageExists() error { + client := s.context.ClientFactory.Create(s) + + _, _, err := client.ImageInspectWithRaw(context.Background(), s.imageName(), false) return err } -func (s *Service) build() (string, error) { - if s.context.Builder == nil { - return s.Config().Image, nil +func (s *Service) imageName() string { + if s.Config().Image != "" { + return s.Config().Image } - - return s.context.Builder.Build(s.context.Project, s) + return fmt.Sprintf("%s_%s", s.context.ProjectName, s.Name()) } -func (s *Service) constructContainers(imageName string, count int) ([]*Container, error) { - result, err := s.collectContainers() +// Build implements Service.Build. If an imageName is specified or if the context has +// no build to work with it will do nothing. Otherwise it will try to build +// the image and returns an error if any. +func (s *Service) Build(ctx context.Context, buildOptions options.Build) error { + if s.Config().Image != "" { + return nil + } + return s.build(ctx, buildOptions) +} + +func (s *Service) build(ctx context.Context, buildOptions options.Build) error { + if s.Config().Build.Context == "" { + return fmt.Errorf("Specified service does not have a build section") + } + builder := &builder.DaemonBuilder{ + Client: s.context.ClientFactory.Create(s), + ContextDirectory: s.Config().Build.Context, + Dockerfile: s.Config().Build.Dockerfile, + AuthConfigs: s.context.AuthLookup.All(), + NoCache: buildOptions.NoCache, + ForceRemove: buildOptions.ForceRemove, + Pull: buildOptions.Pull, + } + return builder.Build(ctx, s.imageName()) +} + +func (s *Service) constructContainers(ctx context.Context, imageName string, count int) ([]*Container, error) { + result, err := s.collectContainers(ctx) if err != nil { return nil, err } @@ -108,24 +194,25 @@ func (s *Service) constructContainers(imageName string, count int) ([]*Container } namer = NewSingleNamer(s.serviceConfig.ContainerName) } else { - namer = NewNamer(client, s.context.Project.Name, s.name) + namer, err = NewNamer(ctx, client, s.context.Project.Name, s.name, false) + if err != nil { + return nil, err + } } - defer namer.Close() - for i := len(result); i < count; i++ { - containerName := namer.Next() + containerName, containerNumber := namer.Next() - c := NewContainer(client, containerName, s) + c := NewContainer(client, containerName, containerNumber, s) - dockerContainer, err := c.Create(imageName) + dockerContainer, err := c.Create(ctx, imageName) if err != nil { return nil, err } - logrus.Debugf("Created container %s: %v", dockerContainer.ID, dockerContainer.Names) + logrus.Debugf("Created container %s: %v", dockerContainer.ID, dockerContainer.Name) - result = append(result, NewContainer(client, containerName, s)) + result = append(result, NewContainer(client, containerName, containerNumber, s)) } return result, nil @@ -133,26 +220,55 @@ func (s *Service) constructContainers(imageName string, count int) ([]*Container // Up implements Service.Up. It builds the image if needed, creates a container // and start it. -func (s *Service) Up() error { - imageName, err := s.build() +func (s *Service) Up(ctx context.Context, options options.Up) error { + containers, err := s.collectContainers(ctx) if err != nil { return err } - return s.up(imageName, true) + var imageName = s.imageName() + if len(containers) == 0 || !options.NoRecreate { + imageName, err = s.ensureImageExists(ctx, options.NoBuild) + if err != nil { + return err + } + } + + return s.up(ctx, imageName, true, options) +} + +// Run implements Service.Run. It runs a one of command within the service container. +func (s *Service) Run(ctx context.Context, commandParts []string) (int, error) { + imageName, err := s.ensureImageExists(ctx, false) + if err != nil { + return -1, err + } + + client := s.context.ClientFactory.Create(s) + + namer, err := NewNamer(ctx, client, s.context.Project.Name, s.name, true) + if err != nil { + return -1, err + } + + containerName, containerNumber := namer.Next() + + c := NewOneOffContainer(client, containerName, containerNumber, s) + + return c.Run(ctx, imageName, &config.ServiceConfig{Command: commandParts, Tty: true, StdinOpen: true}) } // Info implements Service.Info. It returns an project.InfoSet with the containers // related to this service (can be multiple if using the scale command). -func (s *Service) Info(qFlag bool) (project.InfoSet, error) { +func (s *Service) Info(ctx context.Context, qFlag bool) (project.InfoSet, error) { result := project.InfoSet{} - containers, err := s.collectContainers() + containers, err := s.collectContainers(ctx) if err != nil { return nil, err } for _, c := range containers { - info, err := c.Info(qFlag) + info, err := c.Info(ctx, qFlag) if err != nil { return nil, err } @@ -163,12 +279,12 @@ func (s *Service) Info(qFlag bool) (project.InfoSet, error) { } // Start implements Service.Start. It tries to start a container without creating it. -func (s *Service) Start() error { - return s.up("", false) +func (s *Service) Start(ctx context.Context) error { + return s.up(ctx, "", false, options.Up{}) } -func (s *Service) up(imageName string, create bool) error { - containers, err := s.collectContainers() +func (s *Service) up(ctx context.Context, imageName string, create bool, options options.Up) error { + containers, err := s.collectContainers(ctx) if err != nil { return err } @@ -176,41 +292,45 @@ func (s *Service) up(imageName string, create bool) error { logrus.Debugf("Found %d existing containers for service %s", len(containers), s.name) if len(containers) == 0 && create { - c, err := s.createOne(imageName) + c, err := s.createOne(ctx, imageName) if err != nil { return err } containers = []*Container{c} } - return s.eachContainer(func(c *Container) error { + return s.eachContainer(ctx, func(c *Container) error { if create { - if err := s.recreateIfNeeded(imageName, c); err != nil { + if err := s.recreateIfNeeded(ctx, imageName, c, options.NoRecreate, options.ForceRecreate); err != nil { return err } } - return c.Up(imageName) + if options.Log { + go c.Log(ctx, true) + } + + return c.Up(ctx, imageName) }) } -func (s *Service) recreateIfNeeded(imageName string, c *Container) error { - if s.context.NoRecreate { +func (s *Service) recreateIfNeeded(ctx context.Context, imageName string, c *Container, noRecreate, forceRecreate bool) error { + if noRecreate { return nil } - outOfSync, err := c.OutOfSync(imageName) + outOfSync, err := c.OutOfSync(ctx, imageName) if err != nil { return err } logrus.WithFields(logrus.Fields{ "outOfSync": outOfSync, - "ForceRecreate": s.context.ForceRecreate, - "NoRecreate": s.context.NoRecreate}).Debug("Going to decide if recreate is needed") + "ForceRecreate": forceRecreate, + "NoRecreate": noRecreate}).Debug("Going to decide if recreate is needed") - if s.context.ForceRecreate || outOfSync { + if forceRecreate || outOfSync { logrus.Infof("Recreating %s", s.name) - if _, err := c.Recreate(imageName); err != nil { + if _, err := c.Recreate(ctx, imageName); err != nil { return err } } @@ -218,8 +338,8 @@ func (s *Service) recreateIfNeeded(imageName string, c *Container) error { return nil } -func (s *Service) eachContainer(action func(*Container) error) error { - containers, err := s.collectContainers() +func (s *Service) eachContainer(ctx context.Context, action func(*Container) error) error { + containers, err := s.collectContainers(ctx) if err != nil { return err } @@ -238,58 +358,58 @@ func (s *Service) eachContainer(action func(*Container) error) error { return tasks.Wait() } -// Down implements Service.Down. It stops any containers related to the service. -func (s *Service) Down() error { - return s.eachContainer(func(c *Container) error { - return c.Down() +// Stop implements Service.Stop. It stops any containers related to the service. +func (s *Service) Stop(ctx context.Context, timeout int) error { + return s.eachContainer(ctx, func(c *Container) error { + return c.Stop(ctx, timeout) }) } // Restart implements Service.Restart. It restarts any containers related to the service. -func (s *Service) Restart() error { - return s.eachContainer(func(c *Container) error { - return c.Restart() +func (s *Service) Restart(ctx context.Context, timeout int) error { + return s.eachContainer(ctx, func(c *Container) error { + return c.Restart(ctx, timeout) }) } // Kill implements Service.Kill. It kills any containers related to the service. -func (s *Service) Kill() error { - return s.eachContainer(func(c *Container) error { - return c.Kill() +func (s *Service) Kill(ctx context.Context, signal string) error { + return s.eachContainer(ctx, func(c *Container) error { + return c.Kill(ctx, signal) }) } // Delete implements Service.Delete. It removes any containers related to the service. -func (s *Service) Delete() error { - return s.eachContainer(func(c *Container) error { - return c.Delete() +func (s *Service) Delete(ctx context.Context, options options.Delete) error { + return s.eachContainer(ctx, func(c *Container) error { + return c.Delete(ctx, options.RemoveVolume) }) } // Log implements Service.Log. It returns the docker logs for each container related to the service. -func (s *Service) Log() error { - return s.eachContainer(func(c *Container) error { - return c.Log() +func (s *Service) Log(ctx context.Context, follow bool) error { + return s.eachContainer(ctx, func(c *Container) error { + return c.Log(ctx, follow) }) } // Scale implements Service.Scale. It creates or removes containers to have the specified number // of related container to the service to run. -func (s *Service) Scale(scale int) error { +func (s *Service) Scale(ctx context.Context, scale int, timeout int) error { if s.specificiesHostPort() { logrus.Warnf("The \"%s\" service specifies a port on the host. If multiple containers for this service are created on a single host, the port will clash.", s.Name()) } foundCount := 0 - err := s.eachContainer(func(c *Container) error { + err := s.eachContainer(ctx, func(c *Container) error { foundCount++ if foundCount > scale { - err := c.Down() + err := c.Stop(ctx, timeout) if err != nil { return err } - - return c.Delete() + // FIXME(vdemeester) remove volume in scale by default ? + return c.Delete(ctx, false) } return nil }) @@ -299,33 +419,67 @@ func (s *Service) Scale(scale int) error { } if foundCount != scale { - imageName, err := s.build() + imageName, err := s.ensureImageExists(ctx, false) if err != nil { return err } - if _, err = s.constructContainers(imageName, scale); err != nil { + if _, err = s.constructContainers(ctx, imageName, scale); err != nil { return err } } - return s.up("", false) + return s.up(ctx, "", false, options.Up{}) } -// Pull implements Service.Pull. It pulls or build the image of the service. -func (s *Service) Pull() error { +// Pull implements Service.Pull. It pulls the image of the service and skip the service that +// would need to be built. +func (s *Service) Pull(ctx context.Context) error { if s.Config().Image == "" { return nil } - return pullImage(s.context.ClientFactory.Create(s), s, s.Config().Image) + return pullImage(ctx, s.context.ClientFactory.Create(s), s, s.Config().Image) +} + +// Pause implements Service.Pause. It puts into pause the container(s) related +// to the service. +func (s *Service) Pause(ctx context.Context) error { + return s.eachContainer(ctx, func(c *Container) error { + return c.Pause(ctx) + }) +} + +// Unpause implements Service.Pause. It brings back from pause the container(s) +// related to the service. +func (s *Service) Unpause(ctx context.Context) error { + return s.eachContainer(ctx, func(c *Container) error { + return c.Unpause(ctx) + }) +} + +// RemoveImage implements Service.RemoveImage. It removes images used for the service +// depending on the specified type. +func (s *Service) RemoveImage(ctx context.Context, imageType options.ImageType) error { + switch imageType { + case "local": + if s.Config().Image != "" { + return nil + } + return removeImage(ctx, s.context.ClientFactory.Create(s), s.imageName()) + case "all": + return removeImage(ctx, s.context.ClientFactory.Create(s), s.imageName()) + default: + // Don't do a thing, should be validated up-front + return nil + } } // Containers implements Service.Containers. It returns the list of containers // that are related to the service. -func (s *Service) Containers() ([]project.Container, error) { +func (s *Service) Containers(ctx context.Context) ([]project.Container, error) { result := []project.Container{} - containers, err := s.collectContainers() + containers, err := s.collectContainers(ctx) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/libcompose/docker/service_factory.go b/vendor/github.com/docker/libcompose/docker/service_factory.go index d5b3b727..3b455ee4 100644 --- a/vendor/github.com/docker/libcompose/docker/service_factory.go +++ b/vendor/github.com/docker/libcompose/docker/service_factory.go @@ -1,6 +1,9 @@ package docker -import "github.com/docker/libcompose/project" +import ( + "github.com/docker/libcompose/config" + "github.com/docker/libcompose/project" +) // ServiceFactory is an implementation of project.ServiceFactory. type ServiceFactory struct { @@ -8,6 +11,6 @@ type ServiceFactory struct { } // Create creates a Service based on the specified project, name and service configuration. -func (s *ServiceFactory) Create(project *project.Project, name string, serviceConfig *project.ServiceConfig) (project.Service, error) { +func (s *ServiceFactory) Create(project *project.Project, name string, serviceConfig *config.ServiceConfig) (project.Service, error) { return NewService(name, serviceConfig, s.context), nil } diff --git a/vendor/github.com/docker/libcompose/docker/service_test.go b/vendor/github.com/docker/libcompose/docker/service_test.go deleted file mode 100644 index 69f6251b..00000000 --- a/vendor/github.com/docker/libcompose/docker/service_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package docker - -import ( - "github.com/docker/libcompose/project" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestSpecifiesHostPort(t *testing.T) { - servicesWithHostPort := []Service{ - {serviceConfig: &project.ServiceConfig{Ports: []string{"8000:8000"}}}, - {serviceConfig: &project.ServiceConfig{Ports: []string{"127.0.0.1:8000:8000"}}}, - } - - for _, service := range servicesWithHostPort { - assert.True(t, service.specificiesHostPort()) - } - - servicesWithoutHostPort := []Service{ - {serviceConfig: &project.ServiceConfig{Ports: []string{"8000"}}}, - {serviceConfig: &project.ServiceConfig{Ports: []string{"127.0.0.1::8000"}}}, - } - - for _, service := range servicesWithoutHostPort { - assert.False(t, service.specificiesHostPort()) - } -} diff --git a/vendor/github.com/docker/libcompose/docker/labels.go b/vendor/github.com/docker/libcompose/labels/labels.go similarity index 56% rename from vendor/github.com/docker/libcompose/docker/labels.go rename to vendor/github.com/docker/libcompose/labels/labels.go index 213c1e98..859350d2 100644 --- a/vendor/github.com/docker/libcompose/docker/labels.go +++ b/vendor/github.com/docker/libcompose/labels/labels.go @@ -1,7 +1,8 @@ -package docker +package labels import ( "encoding/json" + "fmt" "github.com/docker/libcompose/utils" ) @@ -11,20 +12,26 @@ type Label string // Libcompose default labels. const ( - NAME = Label("io.docker.compose.name") - PROJECT = Label("io.docker.compose.project") - SERVICE = Label("io.docker.compose.service") - HASH = Label("io.docker.compose.config-hash") + NUMBER = Label("com.docker.compose.container-number") + ONEOFF = Label("com.docker.compose.oneoff") + PROJECT = Label("com.docker.compose.project") + SERVICE = Label("com.docker.compose.service") + HASH = Label("com.docker.compose.config-hash") + VERSION = Label("com.docker.compose.version") + + PROJECT_LEGACY = Label("io.docker.compose.project") + SERVICE_LEGACY = Label("io.docker.compose.service") + HASH_LEGACY = Label("io.docker.compose.config-hash") ) // EqString returns a label json string representation with the specified value. func (f Label) EqString(value string) string { - return utils.LabelFilterString(string(f), value) + return LabelFilterString(string(f), value) } // Eq returns a label map representation with the specified value. func (f Label) Eq(value string) map[string][]string { - return utils.LabelFilter(string(f), value) + return LabelFilter(string(f), value) } // AndString returns a json list of labels by merging the two specified values (left and right) serialized as string. @@ -73,3 +80,19 @@ func And(left, right map[string][]string) map[string][]string { func (f Label) Str() string { return string(f) } + +// LabelFilterString returns a label json string representation of the specifed couple (key,value) +// that is used as filter for docker. +func LabelFilterString(key, value string) string { + return utils.FilterString(map[string][]string{ + "label": {fmt.Sprintf("%s=%s", key, value)}, + }) +} + +// LabelFilter returns a label map representation of the specifed couple (key,value) +// that is used as filter for docker. +func LabelFilter(key, value string) map[string][]string { + return map[string][]string{ + "label": {fmt.Sprintf("%s=%s", key, value)}, + } +} diff --git a/vendor/github.com/docker/libcompose/lookup/composable.go b/vendor/github.com/docker/libcompose/lookup/composable.go new file mode 100644 index 00000000..c48987fe --- /dev/null +++ b/vendor/github.com/docker/libcompose/lookup/composable.go @@ -0,0 +1,25 @@ +package lookup + +import ( + "github.com/docker/libcompose/config" +) + +// ComposableEnvLookup is a structure that implements the project.EnvironmentLookup interface. +// It holds an ordered list of EnvironmentLookup to call to look for the environment value. +type ComposableEnvLookup struct { + Lookups []config.EnvironmentLookup +} + +// Lookup creates a string slice of string containing a "docker-friendly" environment string +// in the form of 'key=value'. It loop through the lookups and returns the latest value if +// more than one lookup return a result. +func (l *ComposableEnvLookup) Lookup(key, serviceName string, config *config.ServiceConfig) []string { + result := []string{} + for _, lookup := range l.Lookups { + env := lookup.Lookup(key, serviceName, config) + if len(env) == 1 { + result = env + } + } + return result +} diff --git a/vendor/github.com/docker/libcompose/lookup/envfile.go b/vendor/github.com/docker/libcompose/lookup/envfile.go new file mode 100644 index 00000000..65bd9867 --- /dev/null +++ b/vendor/github.com/docker/libcompose/lookup/envfile.go @@ -0,0 +1,31 @@ +package lookup + +import ( + "strings" + + "github.com/docker/docker/runconfig/opts" + "github.com/docker/libcompose/config" +) + +// EnvfileLookup is a structure that implements the project.EnvironmentLookup interface. +// It holds the path of the file where to lookup environment values. +type EnvfileLookup struct { + Path string +} + +// Lookup creates a string slice of string containing a "docker-friendly" environment string +// in the form of 'key=value'. It gets environment values using a '.env' file in the specified +// path. +func (l *EnvfileLookup) Lookup(key, serviceName string, config *config.ServiceConfig) []string { + envs, err := opts.ParseEnvFile(l.Path) + if err != nil { + return []string{} + } + for _, env := range envs { + e := strings.Split(env, "=") + if e[0] == key { + return []string{env} + } + } + return []string{} +} diff --git a/vendor/github.com/docker/libcompose/lookup/file.go b/vendor/github.com/docker/libcompose/lookup/file.go index 88a585ab..70b3d8d6 100644 --- a/vendor/github.com/docker/libcompose/lookup/file.go +++ b/vendor/github.com/docker/libcompose/lookup/file.go @@ -2,13 +2,44 @@ package lookup import ( "io/ioutil" + "os" "path" + "path/filepath" "strings" "github.com/Sirupsen/logrus" ) -// FileConfigLookup is a "bare" structure that implements the project.ConfigLookup interface +// relativePath returns the proper relative path for the given file path. If +// the relativeTo string equals "-", then it means that it's from the stdin, +// and the returned path will be the current working directory. Otherwise, if +// file is really an absolute path, then it will be returned without any +// changes. Otherwise, the returned path will be a combination of relativeTo +// and file. +func relativePath(file, relativeTo string) string { + // stdin: return the current working directory if possible. + if relativeTo == "-" { + if cwd, err := os.Getwd(); err == nil { + return cwd + } + } + + // If the given file is already an absolute path, just return it. + // Otherwise, the returned path will be relative to the given relativeTo + // path. + if filepath.IsAbs(file) { + return file + } + + abs, err := filepath.Abs(filepath.Join(path.Dir(relativeTo), file)) + if err != nil { + logrus.Errorf("Failed to get absolute directory: %s", err) + return file + } + return abs +} + +// FileConfigLookup is a "bare" structure that implements the project.ResourceLookup interface type FileConfigLookup struct { } @@ -17,14 +48,19 @@ type FileConfigLookup struct { // If file starts with a slash ('/'), it tries to load it, otherwise it will build a // filename using the folder part of relativeTo joined with file. func (f *FileConfigLookup) Lookup(file, relativeTo string) ([]byte, string, error) { - if strings.HasPrefix(file, "/") { - logrus.Debugf("Reading file %s", file) - bytes, err := ioutil.ReadFile(file) - return bytes, file, err - } - - fileName := path.Join(path.Dir(relativeTo), file) - logrus.Debugf("Reading file %s relative to %s", fileName, relativeTo) - bytes, err := ioutil.ReadFile(fileName) - return bytes, fileName, err + file = relativePath(file, relativeTo) + logrus.Debugf("Reading file %s", file) + bytes, err := ioutil.ReadFile(file) + return bytes, file, err +} + +// ResolvePath returns the path to be used for the given path volume. This +// function already takes care of relative paths. +func (f *FileConfigLookup) ResolvePath(path, inFile string) string { + vs := strings.SplitN(path, ":", 2) + if len(vs) != 2 || filepath.IsAbs(vs[0]) { + return path + } + vs[0] = relativePath(vs[0], inFile) + return strings.Join(vs, ":") } diff --git a/vendor/github.com/docker/libcompose/lookup/file_test.go b/vendor/github.com/docker/libcompose/lookup/file_test.go deleted file mode 100644 index 6814e6b5..00000000 --- a/vendor/github.com/docker/libcompose/lookup/file_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package lookup - -import ( - "io/ioutil" - "path/filepath" - "testing" -) - -type input struct { - file string - relativeTo string -} - -func TestLookupError(t *testing.T) { - invalids := map[input]string{ - input{"", ""}: "read .: is a directory", - input{"", "/tmp/"}: "read /tmp: is a directory", - input{"file", "/does/not/exists/"}: "open /does/not/exists/file: no such file or directory", - input{"file", "/does/not/something"}: "open /does/not/file: no such file or directory", - input{"file", "/does/not/exists/another"}: "open /does/not/exists/file: no such file or directory", - input{"/does/not/exists/file", "/tmp/"}: "open /does/not/exists/file: no such file or directory", - input{"does/not/exists/file", "/tmp/"}: "open /tmp/does/not/exists/file: no such file or directory", - } - - fileConfigLookup := FileConfigLookup{} - - for invalid, expectedError := range invalids { - _, _, err := fileConfigLookup.Lookup(invalid.file, invalid.relativeTo) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected error with '%s', got '%v'", expectedError, err) - } - } -} - -func TestLookupOK(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "lookup-tests") - if err != nil { - t.Fatal(err) - } - tmpFile1 := filepath.Join(tmpFolder, "file1") - tmpFile2 := filepath.Join(tmpFolder, "file2") - if err = ioutil.WriteFile(tmpFile1, []byte("content1"), 0755); err != nil { - t.Fatal(err) - } - if err = ioutil.WriteFile(tmpFile2, []byte("content2"), 0755); err != nil { - t.Fatal(err) - } - - fileConfigLookup := FileConfigLookup{} - - valids := map[input]string{ - input{"file1", tmpFolder + "/"}: "content1", - input{"file2", tmpFolder + "/"}: "content2", - input{tmpFile1, tmpFolder}: "content1", - input{tmpFile1, "/does/not/exists"}: "content1", - input{"file2", tmpFile1}: "content2", - } - - for valid, expectedContent := range valids { - out, _, err := fileConfigLookup.Lookup(valid.file, valid.relativeTo) - if err != nil || string(out) != expectedContent { - t.Fatalf("Expected %s to contains '%s', got %s, %v.", valid.file, expectedContent, out, err) - } - } -} diff --git a/vendor/github.com/docker/libcompose/lookup/simple_env.go b/vendor/github.com/docker/libcompose/lookup/simple_env.go index 7b8aacd9..6f8c25bf 100644 --- a/vendor/github.com/docker/libcompose/lookup/simple_env.go +++ b/vendor/github.com/docker/libcompose/lookup/simple_env.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - "github.com/docker/libcompose/project" + "github.com/docker/libcompose/config" ) // OsEnvLookup is a "bare" structure that implements the project.EnvironmentLookup interface @@ -15,7 +15,7 @@ type OsEnvLookup struct { // in the form of 'key=value'. It gets environment values using os.Getenv. // If the os environment variable does not exists, the slice is empty. serviceName and config // are not used at all in this implementation. -func (o *OsEnvLookup) Lookup(key, serviceName string, config *project.ServiceConfig) []string { +func (o *OsEnvLookup) Lookup(key, serviceName string, config *config.ServiceConfig) []string { ret := os.Getenv(key) if ret == "" { return []string{} diff --git a/vendor/github.com/docker/libcompose/lookup/simple_env_test.go b/vendor/github.com/docker/libcompose/lookup/simple_env_test.go deleted file mode 100644 index 4d3e4e1f..00000000 --- a/vendor/github.com/docker/libcompose/lookup/simple_env_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package lookup - -import ( - "testing" - - "github.com/docker/libcompose/project" -) - -func TestOsEnvLookup(t *testing.T) { - // Putting bare minimun value for serviceName and config as there are - // not important on this test. - serviceName := "anything" - config := &project.ServiceConfig{} - - osEnvLookup := &OsEnvLookup{} - - envs := osEnvLookup.Lookup("PATH", serviceName, config) - if len(envs) != 1 { - t.Fatalf("Expected envs to contains one element, but was %v", envs) - } - - envs = osEnvLookup.Lookup("path", serviceName, config) - if len(envs) != 0 { - t.Fatalf("Expected envs to be empty, but was %v", envs) - } - - envs = osEnvLookup.Lookup("DOES_NOT_EXIST", serviceName, config) - if len(envs) != 0 { - t.Fatalf("Expected envs to be empty, but was %v", envs) - } -} diff --git a/vendor/github.com/docker/libcompose/package.go b/vendor/github.com/docker/libcompose/package.go deleted file mode 100644 index c49a9cc6..00000000 --- a/vendor/github.com/docker/libcompose/package.go +++ /dev/null @@ -1 +0,0 @@ -package libcompose diff --git a/vendor/github.com/docker/libcompose/docker/client_factory.go b/vendor/github.com/docker/libcompose/project/client_factory.go similarity index 54% rename from vendor/github.com/docker/libcompose/docker/client_factory.go rename to vendor/github.com/docker/libcompose/project/client_factory.go index ba7f2be8..141fa3bb 100644 --- a/vendor/github.com/docker/libcompose/docker/client_factory.go +++ b/vendor/github.com/docker/libcompose/project/client_factory.go @@ -1,8 +1,8 @@ -package docker +package project import ( - "github.com/docker/libcompose/project" - dockerclient "github.com/fsouza/go-dockerclient" + "github.com/docker/engine-api/client" + composeclient "github.com/docker/libcompose/docker/client" ) // ClientFactory is a factory to create docker clients. @@ -10,17 +10,17 @@ type ClientFactory interface { // Create constructs a Docker client for the given service. The passed in // config may be nil in which case a generic client for the project should // be returned. - Create(service project.Service) *dockerclient.Client + Create(service Service) client.APIClient } type defaultClientFactory struct { - client *dockerclient.Client + client client.APIClient } // NewDefaultClientFactory creates and returns the default client factory that uses -// github.com/samalba/dockerclient. -func NewDefaultClientFactory(opts ClientOpts) (ClientFactory, error) { - client, err := CreateClient(opts) +// github.com/docker/engine-api client. +func NewDefaultClientFactory(opts composeclient.Options) (ClientFactory, error) { + client, err := composeclient.Create(opts) if err != nil { return nil, err } @@ -30,6 +30,6 @@ func NewDefaultClientFactory(opts ClientOpts) (ClientFactory, error) { }, nil } -func (s *defaultClientFactory) Create(service project.Service) *dockerclient.Client { +func (s *defaultClientFactory) Create(service Service) client.APIClient { return s.client } diff --git a/vendor/github.com/docker/libcompose/project/container.go b/vendor/github.com/docker/libcompose/project/container.go new file mode 100644 index 00000000..f822c400 --- /dev/null +++ b/vendor/github.com/docker/libcompose/project/container.go @@ -0,0 +1,13 @@ +package project + +import ( + "golang.org/x/net/context" +) + +// Container defines what a libcompose container provides. +type Container interface { + ID() (string, error) + Name() string + Port(ctx context.Context, port string) (string, error) + IsRunning(ctx context.Context) (bool, error) +} diff --git a/vendor/github.com/docker/libcompose/project/context.go b/vendor/github.com/docker/libcompose/project/context.go index ba82c1d7..255b2d5b 100644 --- a/vendor/github.com/docker/libcompose/project/context.go +++ b/vendor/github.com/docker/libcompose/project/context.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/Sirupsen/logrus" + "github.com/docker/libcompose/config" "github.com/docker/libcompose/logger" ) @@ -18,51 +19,47 @@ var projectRegexp = regexp.MustCompile("[^a-zA-Z0-9_.-]") // Context holds context meta information about a libcompose project, like // the project name, the compose file, etc. type Context struct { - Timeout uint - Log bool - Volume bool - ForceRecreate bool - NoRecreate bool - Signal int - ComposeFile string - ComposeBytes []byte + ComposeFiles []string + ComposeBytes [][]byte ProjectName string isOpen bool ServiceFactory ServiceFactory - EnvironmentLookup EnvironmentLookup - ConfigLookup ConfigLookup + EnvironmentLookup config.EnvironmentLookup + ResourceLookup config.ResourceLookup LoggerFactory logger.Factory IgnoreMissingConfig bool Project *Project } -func (c *Context) readComposeFile() error { +func (c *Context) readComposeFiles() error { if c.ComposeBytes != nil { return nil } - logrus.Debugf("Opening compose file: %s", c.ComposeFile) + logrus.Debugf("Opening compose files: %s", strings.Join(c.ComposeFiles, ",")) - if c.ComposeFile == "-" { + // Handle STDIN (`-f -`) + if len(c.ComposeFiles) == 1 && c.ComposeFiles[0] == "-" { composeBytes, err := ioutil.ReadAll(os.Stdin) if err != nil { logrus.Errorf("Failed to read compose file from stdin: %v", err) return err } - c.ComposeBytes = composeBytes - } else if c.ComposeFile != "" { - if composeBytes, err := ioutil.ReadFile(c.ComposeFile); os.IsNotExist(err) { - if c.IgnoreMissingConfig { - return nil - } - logrus.Errorf("Failed to find %s", c.ComposeFile) + c.ComposeBytes = [][]byte{composeBytes} + return nil + } + + for _, composeFile := range c.ComposeFiles { + composeBytes, err := ioutil.ReadFile(composeFile) + if err != nil && !os.IsNotExist(err) { + logrus.Errorf("Failed to open the compose file: %s", composeFile) return err - } else if err != nil { - logrus.Errorf("Failed to open %s", c.ComposeFile) - return err - } else { - c.ComposeBytes = composeBytes } + if err != nil && !c.IgnoreMissingConfig { + logrus.Errorf("Failed to find the compose file: %s", composeFile) + return err + } + c.ComposeBytes = append(c.ComposeBytes, composeBytes) } return nil @@ -74,16 +71,12 @@ func (c *Context) determineProject() error { return err } - c.ProjectName = projectRegexp.ReplaceAllString(strings.ToLower(name), "-") + c.ProjectName = normalizeName(name) if c.ProjectName == "" { return fmt.Errorf("Falied to determine project name") } - if strings.ContainsAny(c.ProjectName[0:1], "_.-") { - c.ProjectName = "x" + c.ProjectName - } - return nil } @@ -96,9 +89,14 @@ func (c *Context) lookupProjectName() (string, error) { return envProject, nil } - f, err := filepath.Abs(c.ComposeFile) + file := "." + if len(c.ComposeFiles) > 0 { + file = c.ComposeFiles[0] + } + + f, err := filepath.Abs(file) if err != nil { - logrus.Errorf("Failed to get absolute directory for: %s", c.ComposeFile) + logrus.Errorf("Failed to get absolute directory for: %s", file) return "", err } @@ -114,6 +112,11 @@ func (c *Context) lookupProjectName() (string, error) { } } +func normalizeName(name string) string { + r := regexp.MustCompile("[^a-z0-9]+") + return r.ReplaceAllString(strings.ToLower(name), "") +} + func toUnixPath(p string) string { return strings.Replace(p, "\\", "/", -1) } @@ -123,7 +126,7 @@ func (c *Context) open() error { return nil } - if err := c.readComposeFile(); err != nil { + if err := c.readComposeFiles(); err != nil { return err } diff --git a/vendor/github.com/docker/libcompose/project/empty.go b/vendor/github.com/docker/libcompose/project/empty.go index 0cad9c44..559b0270 100644 --- a/vendor/github.com/docker/libcompose/project/empty.go +++ b/vendor/github.com/docker/libcompose/project/empty.go @@ -1,70 +1,96 @@ package project +import ( + "golang.org/x/net/context" + + "github.com/docker/libcompose/project/options" +) + // EmptyService is a struct that implements Service but does nothing. type EmptyService struct { } // Create implements Service.Create but does nothing. -func (e *EmptyService) Create() error { +func (e *EmptyService) Create(ctx context.Context, options options.Create) error { return nil } // Build implements Service.Build but does nothing. -func (e *EmptyService) Build() error { +func (e *EmptyService) Build(ctx context.Context, buildOptions options.Build) error { return nil } // Up implements Service.Up but does nothing. -func (e *EmptyService) Up() error { +func (e *EmptyService) Up(ctx context.Context, options options.Up) error { return nil } // Start implements Service.Start but does nothing. -func (e *EmptyService) Start() error { +func (e *EmptyService) Start(ctx context.Context) error { return nil } -// Down implements Service.Down but does nothing. -func (e *EmptyService) Down() error { +// Stop implements Service.Stop() but does nothing. +func (e *EmptyService) Stop(ctx context.Context, timeout int) error { return nil } // Delete implements Service.Delete but does nothing. -func (e *EmptyService) Delete() error { +func (e *EmptyService) Delete(ctx context.Context, options options.Delete) error { return nil } // Restart implements Service.Restart but does nothing. -func (e *EmptyService) Restart() error { +func (e *EmptyService) Restart(ctx context.Context, timeout int) error { return nil } // Log implements Service.Log but does nothing. -func (e *EmptyService) Log() error { +func (e *EmptyService) Log(ctx context.Context, follow bool) error { return nil } // Pull implements Service.Pull but does nothing. -func (e *EmptyService) Pull() error { +func (e *EmptyService) Pull(ctx context.Context) error { return nil } // Kill implements Service.Kill but does nothing. -func (e *EmptyService) Kill() error { +func (e *EmptyService) Kill(ctx context.Context, signal string) error { return nil } // Containers implements Service.Containers but does nothing. -func (e *EmptyService) Containers() ([]Container, error) { +func (e *EmptyService) Containers(ctx context.Context) ([]Container, error) { return []Container{}, nil } // Scale implements Service.Scale but does nothing. -func (e *EmptyService) Scale(count int) error { +func (e *EmptyService) Scale(ctx context.Context, count int, timeout int) error { return nil } // Info implements Service.Info but does nothing. -func (e *EmptyService) Info(qFlag bool) (InfoSet, error) { +func (e *EmptyService) Info(ctx context.Context, qFlag bool) (InfoSet, error) { return InfoSet{}, nil } + +// Pause implements Service.Pause but does nothing. +func (e *EmptyService) Pause(ctx context.Context) error { + return nil +} + +// Unpause implements Service.Pause but does nothing. +func (e *EmptyService) Unpause(ctx context.Context) error { + return nil +} + +// Run implements Service.Run but does nothing. +func (e *EmptyService) Run(ctx context.Context, commandParts []string) (int, error) { + return 0, nil +} + +// RemoveImage implements Service.RemoveImage but does nothing. +func (e *EmptyService) RemoveImage(ctx context.Context, imageType options.ImageType) error { + return nil +} diff --git a/vendor/github.com/docker/libcompose/project/events/events.go b/vendor/github.com/docker/libcompose/project/events/events.go new file mode 100644 index 00000000..2181ffd8 --- /dev/null +++ b/vendor/github.com/docker/libcompose/project/events/events.go @@ -0,0 +1,197 @@ +// Package events holds event structures, methods and functions. +package events + +import ( + "fmt" +) + +// Notifier defines the methods an event notifier should have. +type Notifier interface { + Notify(eventType EventType, serviceName string, data map[string]string) +} + +// Emitter defines the methods an event emitter should have. +type Emitter interface { + AddListener(c chan<- Event) +} + +// Event holds project-wide event informations. +type Event struct { + EventType EventType + ServiceName string + Data map[string]string +} + +// EventType defines a type of libcompose event. +type EventType int + +// Definitions of libcompose events +const ( + NoEvent = EventType(iota) + + ContainerCreated = EventType(iota) + ContainerStarted = EventType(iota) + + ServiceAdd = EventType(iota) + ServiceUpStart = EventType(iota) + ServiceUpIgnored = EventType(iota) + ServiceUp = EventType(iota) + ServiceCreateStart = EventType(iota) + ServiceCreate = EventType(iota) + ServiceDeleteStart = EventType(iota) + ServiceDelete = EventType(iota) + ServiceDownStart = EventType(iota) + ServiceDown = EventType(iota) + ServiceRestartStart = EventType(iota) + ServiceRestart = EventType(iota) + ServicePullStart = EventType(iota) + ServicePull = EventType(iota) + ServiceKillStart = EventType(iota) + ServiceKill = EventType(iota) + ServiceStartStart = EventType(iota) + ServiceStart = EventType(iota) + ServiceBuildStart = EventType(iota) + ServiceBuild = EventType(iota) + ServicePauseStart = EventType(iota) + ServicePause = EventType(iota) + ServiceUnpauseStart = EventType(iota) + ServiceUnpause = EventType(iota) + ServiceStopStart = EventType(iota) + ServiceStop = EventType(iota) + ServiceRunStart = EventType(iota) + ServiceRun = EventType(iota) + + VolumeAdd = EventType(iota) + NetworkAdd = EventType(iota) + + ProjectDownStart = EventType(iota) + ProjectDownDone = EventType(iota) + ProjectCreateStart = EventType(iota) + ProjectCreateDone = EventType(iota) + ProjectUpStart = EventType(iota) + ProjectUpDone = EventType(iota) + ProjectDeleteStart = EventType(iota) + ProjectDeleteDone = EventType(iota) + ProjectRestartStart = EventType(iota) + ProjectRestartDone = EventType(iota) + ProjectReload = EventType(iota) + ProjectReloadTrigger = EventType(iota) + ProjectKillStart = EventType(iota) + ProjectKillDone = EventType(iota) + ProjectStartStart = EventType(iota) + ProjectStartDone = EventType(iota) + ProjectBuildStart = EventType(iota) + ProjectBuildDone = EventType(iota) + ProjectPauseStart = EventType(iota) + ProjectPauseDone = EventType(iota) + ProjectUnpauseStart = EventType(iota) + ProjectUnpauseDone = EventType(iota) + ProjectStopStart = EventType(iota) + ProjectStopDone = EventType(iota) +) + +func (e EventType) String() string { + var m string + switch e { + case ContainerCreated: + m = "Created container" + case ContainerStarted: + m = "Started container" + + case ServiceAdd: + m = "Adding" + case ServiceUpStart: + m = "Starting" + case ServiceUpIgnored: + m = "Ignoring" + case ServiceUp: + m = "Started" + case ServiceCreateStart: + m = "Creating" + case ServiceCreate: + m = "Created" + case ServiceDeleteStart: + m = "Deleting" + case ServiceDelete: + m = "Deleted" + case ServiceStopStart: + m = "Stopping" + case ServiceStop: + m = "Stopped" + case ServiceDownStart: + m = "Stopping" + case ServiceDown: + m = "Stopped" + case ServiceRestartStart: + m = "Restarting" + case ServiceRestart: + m = "Restarted" + case ServicePullStart: + m = "Pulling" + case ServicePull: + m = "Pulled" + case ServiceKillStart: + m = "Killing" + case ServiceKill: + m = "Killed" + case ServiceStartStart: + m = "Starting" + case ServiceStart: + m = "Started" + case ServiceBuildStart: + m = "Building" + case ServiceBuild: + m = "Built" + case ServiceRunStart: + m = "Executing" + case ServiceRun: + m = "Executed" + + case ProjectDownStart: + m = "Stopping project" + case ProjectDownDone: + m = "Project stopped" + case ProjectStopStart: + m = "Stopping project" + case ProjectStopDone: + m = "Project stopped" + case ProjectCreateStart: + m = "Creating project" + case ProjectCreateDone: + m = "Project created" + case ProjectUpStart: + m = "Starting project" + case ProjectUpDone: + m = "Project started" + case ProjectDeleteStart: + m = "Deleting project" + case ProjectDeleteDone: + m = "Project deleted" + case ProjectRestartStart: + m = "Restarting project" + case ProjectRestartDone: + m = "Project restarted" + case ProjectReload: + m = "Reloading project" + case ProjectReloadTrigger: + m = "Triggering project reload" + case ProjectKillStart: + m = "Killing project" + case ProjectKillDone: + m = "Project killed" + case ProjectStartStart: + m = "Starting project" + case ProjectStartDone: + m = "Project started" + case ProjectBuildStart: + m = "Building project" + case ProjectBuildDone: + m = "Project built" + } + + if m == "" { + m = fmt.Sprintf("EventType: %d", int(e)) + } + + return m +} diff --git a/vendor/github.com/docker/libcompose/project/info.go b/vendor/github.com/docker/libcompose/project/info.go index ed799ad9..6689c77e 100644 --- a/vendor/github.com/docker/libcompose/project/info.go +++ b/vendor/github.com/docker/libcompose/project/info.go @@ -6,6 +6,17 @@ import ( "text/tabwriter" ) +// InfoPart holds key/value strings. +type InfoPart struct { + Key, Value string +} + +// InfoSet holds a list of Info. +type InfoSet []Info + +// Info holds a list of InfoPart. +type Info []InfoPart + func (infos InfoSet) String(titleFlag bool) string { //no error checking, none of this should fail buffer := bytes.NewBuffer(make([]byte, 0, 1024)) diff --git a/vendor/github.com/docker/libcompose/project/interface.go b/vendor/github.com/docker/libcompose/project/interface.go new file mode 100644 index 00000000..70cfc53f --- /dev/null +++ b/vendor/github.com/docker/libcompose/project/interface.go @@ -0,0 +1,39 @@ +package project + +import ( + "golang.org/x/net/context" + + "github.com/docker/libcompose/config" + "github.com/docker/libcompose/project/events" + "github.com/docker/libcompose/project/options" +) + +// APIProject is an interface defining the methods a libcompose project should implement. +type APIProject interface { + events.Notifier + events.Emitter + + Build(ctx context.Context, options options.Build, sevice ...string) error + Create(ctx context.Context, options options.Create, services ...string) error + Delete(ctx context.Context, options options.Delete, services ...string) error + Down(ctx context.Context, options options.Down, services ...string) error + Kill(ctx context.Context, signal string, services ...string) error + Log(ctx context.Context, follow bool, services ...string) error + Pause(ctx context.Context, services ...string) error + Ps(ctx context.Context, onlyID bool, services ...string) (InfoSet, error) + // FIXME(vdemeester) we could use nat.Port instead ? + Port(ctx context.Context, index int, protocol, serviceName, privatePort string) (string, error) + Pull(ctx context.Context, services ...string) error + Restart(ctx context.Context, timeout int, services ...string) error + Run(ctx context.Context, serviceName string, commandParts []string) (int, error) + Scale(ctx context.Context, timeout int, servicesScale map[string]int) error + Start(ctx context.Context, services ...string) error + Stop(ctx context.Context, timeout int, services ...string) error + Unpause(ctx context.Context, services ...string) error + Up(ctx context.Context, options options.Up, services ...string) error + + Parse() error + CreateService(name string) (Service, error) + AddConfig(name string, config *config.ServiceConfig) error + Load(bytes []byte) error +} diff --git a/vendor/github.com/docker/libcompose/project/interpolation_test.go b/vendor/github.com/docker/libcompose/project/interpolation_test.go deleted file mode 100644 index d25a974e..00000000 --- a/vendor/github.com/docker/libcompose/project/interpolation_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package project - -import ( - "fmt" - "os" - "testing" - - yaml "github.com/cloudfoundry-incubator/candiedyaml" - "github.com/stretchr/testify/assert" -) - -func testInterpolatedLine(t *testing.T, expectedLine, interpolatedLine string, envVariables map[string]string) { - interpolatedLine, _ = parseLine(interpolatedLine, func(s string) string { - return envVariables[s] - }) - - assert.Equal(t, expectedLine, interpolatedLine) -} - -func testInvalidInterpolatedLine(t *testing.T, line string) { - _, success := parseLine(line, func(string) string { - return "" - }) - - assert.Equal(t, false, success) -} - -func TestParseLine(t *testing.T) { - variables := map[string]string{ - "A": "ABC", - "X": "XYZ", - "E": "", - "lower": "WORKED", - "MiXeD": "WORKED", - "split_VaLue": "WORKED", - "9aNumber": "WORKED", - "a9Number": "WORKED", - } - - testInterpolatedLine(t, "WORKED", "$lower", variables) - testInterpolatedLine(t, "WORKED", "${MiXeD}", variables) - testInterpolatedLine(t, "WORKED", "${split_VaLue}", variables) - // Starting with a number isn't valid - testInterpolatedLine(t, "", "$9aNumber", variables) - testInterpolatedLine(t, "WORKED", "$a9Number", variables) - - testInterpolatedLine(t, "ABC", "$A", variables) - testInterpolatedLine(t, "ABC", "${A}", variables) - - testInterpolatedLine(t, "ABC DE", "$A DE", variables) - testInterpolatedLine(t, "ABCDE", "${A}DE", variables) - - testInterpolatedLine(t, "$A", "$$A", variables) - testInterpolatedLine(t, "${A}", "$${A}", variables) - - testInterpolatedLine(t, "$ABC", "$$${A}", variables) - testInterpolatedLine(t, "$ABC", "$$$A", variables) - - testInterpolatedLine(t, "ABC XYZ", "$A $X", variables) - testInterpolatedLine(t, "ABCXYZ", "$A$X", variables) - testInterpolatedLine(t, "ABCXYZ", "${A}${X}", variables) - - testInterpolatedLine(t, "", "$B", variables) - testInterpolatedLine(t, "", "${B}", variables) - testInterpolatedLine(t, "", "$ADE", variables) - - testInterpolatedLine(t, "", "$E", variables) - testInterpolatedLine(t, "", "${E}", variables) - - testInvalidInterpolatedLine(t, "${") - testInvalidInterpolatedLine(t, "$}") - testInvalidInterpolatedLine(t, "${}") - testInvalidInterpolatedLine(t, "${ }") - testInvalidInterpolatedLine(t, "${A }") - testInvalidInterpolatedLine(t, "${ A}") - testInvalidInterpolatedLine(t, "${A!}") - testInvalidInterpolatedLine(t, "$!") -} - -type MockEnvironmentLookup struct { - Variables map[string]string -} - -func (m MockEnvironmentLookup) Lookup(key, serviceName string, config *ServiceConfig) []string { - return []string{fmt.Sprintf("%s=%s", key, m.Variables[key])} -} - -func testInterpolatedConfig(t *testing.T, expectedConfig, interpolatedConfig string, envVariables map[string]string) { - for k, v := range envVariables { - os.Setenv(k, v) - } - - expectedConfigBytes := []byte(expectedConfig) - interpolatedConfigBytes := []byte(interpolatedConfig) - - expectedData := make(rawServiceMap) - interpolatedData := make(rawServiceMap) - - yaml.Unmarshal(expectedConfigBytes, &expectedData) - yaml.Unmarshal(interpolatedConfigBytes, &interpolatedData) - - _ = interpolate(MockEnvironmentLookup{envVariables}, &interpolatedData) - - for k := range envVariables { - os.Unsetenv(k) - } - - assert.Equal(t, expectedData, interpolatedData) -} - -func testInvalidInterpolatedConfig(t *testing.T, interpolatedConfig string) { - interpolatedConfigBytes := []byte(interpolatedConfig) - interpolatedData := make(rawServiceMap) - yaml.Unmarshal(interpolatedConfigBytes, &interpolatedData) - - err := interpolate(new(MockEnvironmentLookup), &interpolatedData) - - assert.NotNil(t, err) -} - -func TestInterpolate(t *testing.T) { - testInterpolatedConfig(t, - `web: - # unbracketed name - image: busybox - - # array element - ports: - - "80:8000" - - # dictionary item value - labels: - mylabel: "myvalue" - - # unset value - hostname: "host-" - - # escaped interpolation - command: "${ESCAPED}"`, - `web: - # unbracketed name - image: $IMAGE - - # array element - ports: - - "${HOST_PORT}:8000" - - # dictionary item value - labels: - mylabel: "${LABEL_VALUE}" - - # unset value - hostname: "host-${UNSET_VALUE}" - - # escaped interpolation - command: "$${ESCAPED}"`, map[string]string{ - "IMAGE": "busybox", - "HOST_PORT": "80", - "LABEL_VALUE": "myvalue", - }) - - // Same as above, but testing with equal signs in variables - testInterpolatedConfig(t, - `web: - # unbracketed name - image: =busybox - - # array element - ports: - - "=:8000" - - # dictionary item value - labels: - mylabel: "myvalue==" - - # unset value - hostname: "host-" - - # escaped interpolation - command: "${ESCAPED}"`, - `web: - # unbracketed name - image: $IMAGE - - # array element - ports: - - "${HOST_PORT}:8000" - - # dictionary item value - labels: - mylabel: "${LABEL_VALUE}" - - # unset value - hostname: "host-${UNSET_VALUE}" - - # escaped interpolation - command: "$${ESCAPED}"`, map[string]string{ - "IMAGE": "=busybox", - "HOST_PORT": "=", - "LABEL_VALUE": "myvalue==", - }) - - testInvalidInterpolatedConfig(t, - `web: - image: "${"`) - - testInvalidInterpolatedConfig(t, - `web: - image: busybox - - # array element - ports: - - "${}:8000"`) - - testInvalidInterpolatedConfig(t, - `web: - image: busybox - - # array element - ports: - - "80:8000" - - # dictionary item value - labels: - mylabel: "${ LABEL_VALUE}"`) -} diff --git a/vendor/github.com/docker/libcompose/project/listener.go b/vendor/github.com/docker/libcompose/project/listener.go index 5424b028..bb0fc2e5 100644 --- a/vendor/github.com/docker/libcompose/project/listener.go +++ b/vendor/github.com/docker/libcompose/project/listener.go @@ -4,39 +4,40 @@ import ( "bytes" "github.com/Sirupsen/logrus" + "github.com/docker/libcompose/project/events" ) var ( - infoEvents = map[EventType]bool{ - EventProjectDeleteDone: true, - EventProjectDeleteStart: true, - EventProjectDownDone: true, - EventProjectDownStart: true, - EventProjectRestartDone: true, - EventProjectRestartStart: true, - EventProjectUpDone: true, - EventProjectUpStart: true, - EventServiceDeleteStart: true, - EventServiceDelete: true, - EventServiceDownStart: true, - EventServiceDown: true, - EventServiceRestartStart: true, - EventServiceRestart: true, - EventServiceUpStart: true, - EventServiceUp: true, + infoEvents = map[events.EventType]bool{ + events.ProjectDeleteDone: true, + events.ProjectDeleteStart: true, + events.ProjectDownDone: true, + events.ProjectDownStart: true, + events.ProjectRestartDone: true, + events.ProjectRestartStart: true, + events.ProjectUpDone: true, + events.ProjectUpStart: true, + events.ServiceDeleteStart: true, + events.ServiceDelete: true, + events.ServiceDownStart: true, + events.ServiceDown: true, + events.ServiceRestartStart: true, + events.ServiceRestart: true, + events.ServiceUpStart: true, + events.ServiceUp: true, } ) type defaultListener struct { project *Project - listenChan chan Event + listenChan chan events.Event upCount int } // NewDefaultListener create a default listener for the specified project. -func NewDefaultListener(p *Project) chan<- Event { +func NewDefaultListener(p *Project) chan<- events.Event { l := defaultListener{ - listenChan: make(chan Event), + listenChan: make(chan events.Event), project: p, } go l.start() @@ -57,7 +58,7 @@ func (d *defaultListener) start() { } } - if event.EventType == EventServiceUp { + if event.EventType == events.ServiceUp { d.upCount++ } @@ -70,7 +71,7 @@ func (d *defaultListener) start() { if event.ServiceName == "" { logf("Project [%s]: %s %s", d.project.Name, event.EventType, buffer.Bytes()) } else { - logf("[%d/%d] [%s]: %s %s", d.upCount, len(d.project.Configs), event.ServiceName, event.EventType, buffer.Bytes()) + logf("[%d/%d] [%s]: %s %s", d.upCount, d.project.ServiceConfigs.Len(), event.ServiceName, event.EventType, buffer.Bytes()) } } } diff --git a/vendor/github.com/docker/libcompose/project/merge.go b/vendor/github.com/docker/libcompose/project/merge.go deleted file mode 100644 index 9bf5c420..00000000 --- a/vendor/github.com/docker/libcompose/project/merge.go +++ /dev/null @@ -1,299 +0,0 @@ -package project - -import ( - "bufio" - "bytes" - "fmt" - "path" - "strings" - - "github.com/Sirupsen/logrus" - yaml "github.com/cloudfoundry-incubator/candiedyaml" - "github.com/docker/libcompose/utils" -) - -var ( - // ValidRemotes list the of valid prefixes that can be sent to Docker as a build remote location - // This is public for consumers of libcompose to use - ValidRemotes = []string{ - "git://", - "git@github.com:", - "github.com", - "http:", - "https:", - } - noMerge = []string{ - "links", - "volumes_from", - } -) - -type rawService map[string]interface{} -type rawServiceMap map[string]rawService - -func mergeProject(p *Project, bytes []byte) (map[string]*ServiceConfig, error) { - configs := make(map[string]*ServiceConfig) - - datas := make(rawServiceMap) - if err := yaml.Unmarshal(bytes, &datas); err != nil { - return nil, err - } - - if err := interpolate(p.context.EnvironmentLookup, &datas); err != nil { - return nil, err - } - - for name, data := range datas { - data, err := parse(p.context.ConfigLookup, p.context.EnvironmentLookup, p.File, data, datas) - if err != nil { - logrus.Errorf("Failed to parse service %s: %v", name, err) - return nil, err - } - - datas[name] = data - } - - if err := utils.Convert(datas, &configs); err != nil { - return nil, err - } - - adjustValues(configs) - return configs, nil -} - -func adjustValues(configs map[string]*ServiceConfig) { - // yaml parser turns "no" into "false" but that is not valid for a restart policy - for _, v := range configs { - if v.Restart == "false" { - v.Restart = "no" - } - } -} - -func readEnvFile(configLookup ConfigLookup, inFile string, serviceData rawService) (rawService, error) { - var config ServiceConfig - - if err := utils.Convert(serviceData, &config); err != nil { - return nil, err - } - - if len(config.EnvFile.Slice()) == 0 { - return serviceData, nil - } - - if configLookup == nil { - return nil, fmt.Errorf("Can not use env_file in file %s no mechanism provided to load files", inFile) - } - - vars := config.Environment.Slice() - - for i := len(config.EnvFile.Slice()) - 1; i >= 0; i-- { - envFile := config.EnvFile.Slice()[i] - content, _, err := configLookup.Lookup(envFile, inFile) - if err != nil { - return nil, err - } - - if err != nil { - return nil, err - } - - scanner := bufio.NewScanner(bytes.NewBuffer(content)) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - key := strings.SplitAfter(line, "=")[0] - - found := false - for _, v := range vars { - if strings.HasPrefix(v, key) { - found = true - break - } - } - - if !found { - vars = append(vars, line) - } - } - - if scanner.Err() != nil { - return nil, scanner.Err() - } - } - - serviceData["environment"] = vars - - delete(serviceData, "env_file") - - return serviceData, nil -} - -func resolveBuild(inFile string, serviceData rawService) (rawService, error) { - - build := asString(serviceData["build"]) - if build == "" { - return serviceData, nil - } - - for _, remote := range ValidRemotes { - if strings.HasPrefix(build, remote) { - return serviceData, nil - } - } - - current := path.Dir(inFile) - - if build == "." { - build = current - } else { - current = path.Join(current, build) - } - - serviceData["build"] = current - - return serviceData, nil -} - -func parse(configLookup ConfigLookup, environmentLookup EnvironmentLookup, inFile string, serviceData rawService, datas rawServiceMap) (rawService, error) { - serviceData, err := readEnvFile(configLookup, inFile, serviceData) - if err != nil { - return nil, err - } - - serviceData, err = resolveBuild(inFile, serviceData) - if err != nil { - return nil, err - } - - value, ok := serviceData["extends"] - if !ok { - return serviceData, nil - } - - mapValue, ok := value.(map[interface{}]interface{}) - if !ok { - return serviceData, nil - } - - if configLookup == nil { - return nil, fmt.Errorf("Can not use extends in file %s no mechanism provided to files", inFile) - } - - file := asString(mapValue["file"]) - service := asString(mapValue["service"]) - - if service == "" { - return serviceData, nil - } - - var baseService rawService - - if file == "" { - if serviceData, ok := datas[service]; ok { - baseService, err = parse(configLookup, environmentLookup, inFile, serviceData, datas) - } else { - return nil, fmt.Errorf("Failed to find service %s to extend", service) - } - } else { - bytes, resolved, err := configLookup.Lookup(file, inFile) - if err != nil { - logrus.Errorf("Failed to lookup file %s: %v", file, err) - return nil, err - } - - var baseRawServices rawServiceMap - if err := yaml.Unmarshal(bytes, &baseRawServices); err != nil { - return nil, err - } - - err = interpolate(environmentLookup, &baseRawServices) - if err != nil { - return nil, err - } - - baseService, ok = baseRawServices[service] - if !ok { - return nil, fmt.Errorf("Failed to find service %s in file %s", service, file) - } - - baseService, err = parse(configLookup, environmentLookup, resolved, baseService, baseRawServices) - } - - if err != nil { - return nil, err - } - - baseService = clone(baseService) - - logrus.Debugf("Merging %#v, %#v", baseService, serviceData) - - for _, k := range noMerge { - if _, ok := baseService[k]; ok { - source := file - if source == "" { - source = inFile - } - return nil, fmt.Errorf("Cannot extend service '%s' in %s: services with '%s' cannot be extended", service, source, k) - } - } - - for k, v := range serviceData { - // Image and build are mutually exclusive in merge - if k == "image" { - delete(baseService, "build") - } else if k == "build" { - delete(baseService, "image") - } - existing, ok := baseService[k] - if ok { - baseService[k] = merge(existing, v) - } else { - baseService[k] = v - } - } - - logrus.Debugf("Merged result %#v", baseService) - - return baseService, nil -} - -func merge(existing, value interface{}) interface{} { - // append strings - if left, lok := existing.([]interface{}); lok { - if right, rok := value.([]interface{}); rok { - return append(left, right...) - } - } - - //merge maps - if left, lok := existing.(map[interface{}]interface{}); lok { - if right, rok := value.(map[interface{}]interface{}); rok { - newLeft := make(map[interface{}]interface{}) - for k, v := range left { - newLeft[k] = v - } - for k, v := range right { - newLeft[k] = v - } - return newLeft - } - } - - return value -} - -func clone(in rawService) rawService { - result := rawService{} - for k, v := range in { - result[k] = v - } - - return result -} - -func asString(obj interface{}) string { - if v, ok := obj.(string); ok { - return v - } - return "" -} diff --git a/vendor/github.com/docker/libcompose/project/merge_test.go b/vendor/github.com/docker/libcompose/project/merge_test.go deleted file mode 100644 index 3ffe656a..00000000 --- a/vendor/github.com/docker/libcompose/project/merge_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package project - -import "testing" - -type NullLookup struct { -} - -func (n *NullLookup) Lookup(file, relativeTo string) ([]byte, string, error) { - return nil, "", nil -} - -func TestExtendsInheritImage(t *testing.T) { - p := NewProject(&Context{ - ConfigLookup: &NullLookup{}, - }) - - config, err := mergeProject(p, []byte(` -parent: - image: foo -child: - extends: - service: parent -`)) - - if err != nil { - t.Fatal(err) - } - - parent := config["parent"] - child := config["child"] - - if parent.Image != "foo" { - t.Fatal("Invalid image", parent.Image) - } - - if child.Build != "" { - t.Fatal("Invalid build", child.Build) - } - - if child.Image != "foo" { - t.Fatal("Invalid image", child.Image) - } -} - -func TestExtendsInheritBuild(t *testing.T) { - p := NewProject(&Context{ - ConfigLookup: &NullLookup{}, - }) - - config, err := mergeProject(p, []byte(` -parent: - build: . -child: - extends: - service: parent -`)) - - if err != nil { - t.Fatal(err) - } - - parent := config["parent"] - child := config["child"] - - if parent.Build != "." { - t.Fatal("Invalid build", parent.Build) - } - - if child.Build != "." { - t.Fatal("Invalid build", child.Build) - } - - if child.Image != "" { - t.Fatal("Invalid image", child.Image) - } -} - -func TestExtendBuildOverImage(t *testing.T) { - p := NewProject(&Context{ - ConfigLookup: &NullLookup{}, - }) - - config, err := mergeProject(p, []byte(` -parent: - image: foo -child: - build: . - extends: - service: parent -`)) - - if err != nil { - t.Fatal(err) - } - - parent := config["parent"] - child := config["child"] - - if parent.Image != "foo" { - t.Fatal("Invalid image", parent.Image) - } - - if child.Build != "." { - t.Fatal("Invalid build", child.Build) - } - - if child.Image != "" { - t.Fatal("Invalid image", child.Image) - } -} - -func TestExtendImageOverBuild(t *testing.T) { - p := NewProject(&Context{ - ConfigLookup: &NullLookup{}, - }) - - config, err := mergeProject(p, []byte(` -parent: - build: . -child: - image: foo - extends: - service: parent -`)) - - if err != nil { - t.Fatal(err) - } - - parent := config["parent"] - child := config["child"] - - if parent.Image != "" { - t.Fatal("Invalid image", parent.Image) - } - - if parent.Build != "." { - t.Fatal("Invalid build", parent.Build) - } - - if child.Build != "" { - t.Fatal("Invalid build", child.Build) - } - - if child.Image != "foo" { - t.Fatal("Invalid image", child.Image) - } -} - -func TestRestartNo(t *testing.T) { - p := NewProject(&Context{ - ConfigLookup: &NullLookup{}, - }) - - config, err := mergeProject(p, []byte(` -test: - restart: no - image: foo -`)) - - if err != nil { - t.Fatal(err) - } - - test := config["test"] - - if test.Restart != "no" { - t.Fatal("Invalid restart policy", test.Restart) - } -} - -func TestRestartAlways(t *testing.T) { - p := NewProject(&Context{ - ConfigLookup: &NullLookup{}, - }) - - config, err := mergeProject(p, []byte(` -test: - restart: always - image: foo -`)) - - if err != nil { - t.Fatal(err) - } - - test := config["test"] - - if test.Restart != "always" { - t.Fatal("Invalid restart policy", test.Restart) - } -} diff --git a/vendor/github.com/docker/libcompose/project/options/types.go b/vendor/github.com/docker/libcompose/project/options/types.go new file mode 100644 index 00000000..f826f166 --- /dev/null +++ b/vendor/github.com/docker/libcompose/project/options/types.go @@ -0,0 +1,48 @@ +package options + +// Build holds options of compose build. +type Build struct { + NoCache bool + ForceRemove bool + Pull bool +} + +// Delete holds options of compose rm. +type Delete struct { + RemoveVolume bool + BeforeDeleteCallback func([]string) bool +} + +// Down holds options of compose down. +type Down struct { + RemoveVolume bool + RemoveImages ImageType + RemoveOrphans bool +} + +// Create holds options of compose create. +type Create struct { + NoRecreate bool + ForceRecreate bool + NoBuild bool + // ForceBuild bool +} + +// Up holds options of compose up. +type Up struct { + Create + Log bool +} + +// ImageType defines the type of image (local, all) +type ImageType string + +// Valid indicates whether the image type is valid. +func (i ImageType) Valid() bool { + switch string(i) { + case "", "local", "all": + return true + default: + return false + } +} diff --git a/vendor/github.com/docker/libcompose/project/project.go b/vendor/github.com/docker/libcompose/project/project.go index 870b791d..033a690f 100644 --- a/vendor/github.com/docker/libcompose/project/project.go +++ b/vendor/github.com/docker/libcompose/project/project.go @@ -5,41 +5,49 @@ import ( "fmt" "strings" + "golang.org/x/net/context" + log "github.com/Sirupsen/logrus" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "github.com/docker/libcompose/config" + "github.com/docker/libcompose/labels" "github.com/docker/libcompose/logger" + "github.com/docker/libcompose/project/events" + "github.com/docker/libcompose/project/options" "github.com/docker/libcompose/utils" ) -// ServiceState holds the state of a service. -type ServiceState string - -// State definitions -var ( - StateExecuted = ServiceState("executed") - StateUnknown = ServiceState("unknown") -) - -// Error definitions -var ( - ErrRestart = errors.New("Restart execution") - ErrUnsupported = errors.New("UnsupportedOperation") -) - -// Event holds project-wide event informations. -type Event struct { - EventType EventType - ServiceName string - Data map[string]string -} - type wrapperAction func(*serviceWrapper, map[string]*serviceWrapper) type serviceAction func(service Service) error -// NewProject create a new project with the specified context. -func NewProject(context *Context) *Project { +// Project holds libcompose project information. +type Project struct { + Name string + ServiceConfigs *config.ServiceConfigs + VolumeConfigs map[string]*config.VolumeConfig + NetworkConfigs map[string]*config.NetworkConfig + Files []string + ReloadCallback func() error + ParseOptions *config.ParseOptions + + context *Context + clientFactory ClientFactory + reload []string + upCount int + listeners []chan<- events.Event + hasListeners bool +} + +// NewProject creates a new project with the specified context. +func NewProject(clientFactory ClientFactory, context *Context, parseOptions *config.ParseOptions) *Project { p := &Project{ - context: context, - Configs: make(map[string]*ServiceConfig), + context: context, + clientFactory: clientFactory, + ParseOptions: parseOptions, + ServiceConfigs: config.NewServiceConfigs(), + VolumeConfigs: make(map[string]*config.VolumeConfig), + NetworkConfigs: make(map[string]*config.NetworkConfig), } if context.LoggerFactory == nil { @@ -48,7 +56,7 @@ func NewProject(context *Context) *Project { context.Project = p - p.listeners = []chan<- Event{NewDefaultListener(p)} + p.listeners = []chan<- events.Event{NewDefaultListener(p)} return p } @@ -63,23 +71,31 @@ func (p *Project) Parse() error { p.Name = p.context.ProjectName - if p.context.ComposeFile == "-" { - p.File = "." - } else { - p.File = p.context.ComposeFile + p.Files = p.context.ComposeFiles + + if len(p.Files) == 1 && p.Files[0] == "-" { + p.Files = []string{"."} } if p.context.ComposeBytes != nil { - return p.Load(p.context.ComposeBytes) + for i, composeBytes := range p.context.ComposeBytes { + file := "" + if i < len(p.context.ComposeFiles) { + file = p.Files[i] + } + if err := p.load(file, composeBytes); err != nil { + return err + } + } } return nil } -// CreateService creates a service with the specified name based. It there +// CreateService creates a service with the specified name based. If there // is no config in the project for this service, it will return an error. func (p *Project) CreateService(name string) (Service, error) { - existing, ok := p.Configs[name] + existing, ok := p.ServiceConfigs.Get(name) if !ok { return nil, fmt.Errorf("Failed to find service: %s", name) } @@ -88,9 +104,9 @@ func (p *Project) CreateService(name string) (Service, error) { config := *existing if p.context.EnvironmentLookup != nil { - parsedEnv := make([]string, 0, len(config.Environment.Slice())) + parsedEnv := make([]string, 0, len(config.Environment)) - for _, env := range config.Environment.Slice() { + for _, env := range config.Environment { parts := strings.SplitN(env, "=", 2) if len(parts) > 1 && parts[1] != "" { parsedEnv = append(parsedEnv, env) @@ -104,39 +120,71 @@ func (p *Project) CreateService(name string) (Service, error) { } } - config.Environment = NewMaporEqualSlice(parsedEnv) + config.Environment = parsedEnv } return p.context.ServiceFactory.Create(p, name, &config) } // AddConfig adds the specified service config for the specified name. -func (p *Project) AddConfig(name string, config *ServiceConfig) error { - p.Notify(EventServiceAdd, name, nil) +func (p *Project) AddConfig(name string, config *config.ServiceConfig) error { + p.Notify(events.ServiceAdd, name, nil) - p.Configs[name] = config + p.ServiceConfigs.Add(name, config) p.reload = append(p.reload, name) return nil } +// AddVolumeConfig adds the specified volume config for the specified name. +func (p *Project) AddVolumeConfig(name string, config *config.VolumeConfig) error { + p.Notify(events.VolumeAdd, name, nil) + p.VolumeConfigs[name] = config + return nil +} + +// AddNetworkConfig adds the specified network config for the specified name. +func (p *Project) AddNetworkConfig(name string, config *config.NetworkConfig) error { + p.Notify(events.NetworkAdd, name, nil) + p.NetworkConfigs[name] = config + return nil +} + // Load loads the specified byte array (the composefile content) and adds the // service configuration to the project. +// FIXME is it needed ? func (p *Project) Load(bytes []byte) error { - configs := make(map[string]*ServiceConfig) - configs, err := mergeProject(p, bytes) + return p.load("", bytes) +} + +func (p *Project) load(file string, bytes []byte) error { + serviceConfigs, volumeConfigs, networkConfigs, err := config.Merge(p.ServiceConfigs, p.context.EnvironmentLookup, p.context.ResourceLookup, file, bytes, p.ParseOptions) if err != nil { log.Errorf("Could not parse config for project %s : %v", p.Name, err) return err } - for name, config := range configs { + for name, config := range serviceConfigs { err := p.AddConfig(name, config) if err != nil { return err } } + for name, config := range volumeConfigs { + err := p.AddVolumeConfig(name, config) + if err != nil { + return err + } + } + + for name, config := range networkConfigs { + err := p.AddNetworkConfig(name, config) + if err != nil { + return err + } + } + return nil } @@ -153,98 +201,314 @@ func (p *Project) loadWrappers(wrappers map[string]*serviceWrapper, servicesToCo } // Build builds the specified services (like docker build). -func (p *Project) Build(services ...string) error { - return p.perform(EventProjectBuildStart, EventProjectBuildDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Do(wrappers, EventServiceBuildStart, EventServiceBuild, func(service Service) error { - return service.Build() +func (p *Project) Build(ctx context.Context, buildOptions options.Build, services ...string) error { + return p.perform(events.ProjectBuildStart, events.ProjectBuildDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(wrappers, events.ServiceBuildStart, events.ServiceBuild, func(service Service) error { + return service.Build(ctx, buildOptions) }) }), nil) } // Create creates the specified services (like docker create). -func (p *Project) Create(services ...string) error { - return p.perform(EventProjectCreateStart, EventProjectCreateDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Do(wrappers, EventServiceCreateStart, EventServiceCreate, func(service Service) error { - return service.Create() +func (p *Project) Create(ctx context.Context, options options.Create, services ...string) error { + if options.NoRecreate && options.ForceRecreate { + return fmt.Errorf("no-recreate and force-recreate cannot be combined") + } + return p.perform(events.ProjectCreateStart, events.ProjectCreateDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(wrappers, events.ServiceCreateStart, events.ServiceCreate, func(service Service) error { + return service.Create(ctx, options) }) }), nil) } -// Down stops the specified services (like docker stop). -func (p *Project) Down(services ...string) error { - return p.perform(EventProjectDownStart, EventProjectDownDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Do(nil, EventServiceDownStart, EventServiceDown, func(service Service) error { - return service.Down() +// Stop stops the specified services (like docker stop). +func (p *Project) Stop(ctx context.Context, timeout int, services ...string) error { + return p.perform(events.ProjectStopStart, events.ProjectStopDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(nil, events.ServiceStopStart, events.ServiceStop, func(service Service) error { + return service.Stop(ctx, timeout) }) }), nil) } -// Restart restarts the specified services (like docker restart). -func (p *Project) Restart(services ...string) error { - return p.perform(EventProjectRestartStart, EventProjectRestartDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Do(wrappers, EventServiceRestartStart, EventServiceRestart, func(service Service) error { - return service.Restart() - }) - }), nil) -} +// Down stops the specified services and clean related containers (like docker stop + docker rm). +func (p *Project) Down(ctx context.Context, opts options.Down, services ...string) error { + if !opts.RemoveImages.Valid() { + return fmt.Errorf("--rmi flag must be local, all or empty") + } + if err := p.Stop(ctx, 10, services...); err != nil { + return err + } + if opts.RemoveOrphans { + if err := p.removeOrphanContainers(); err != nil { + return err + } + } + if err := p.Delete(ctx, options.Delete{ + RemoveVolume: opts.RemoveVolume, + }, services...); err != nil { + return err + } -// Start starts the specified services (like docker start). -func (p *Project) Start(services ...string) error { - return p.perform(EventProjectStartStart, EventProjectStartDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Do(wrappers, EventServiceStartStart, EventServiceStart, func(service Service) error { - return service.Start() - }) - }), nil) -} - -// Up create and start the specified services (kinda like docker run). -func (p *Project) Up(services ...string) error { - return p.perform(EventProjectUpStart, EventProjectUpDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Do(wrappers, EventServiceUpStart, EventServiceUp, func(service Service) error { - return service.Up() + return p.forEach([]string{}, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(wrappers, events.NoEvent, events.NoEvent, func(service Service) error { + return service.RemoveImage(ctx, opts.RemoveImages) }) }), func(service Service) error { - return service.Create() + return service.Create(ctx, options.Create{}) }) } -// Log aggregate and prints out the logs for the specified services. -func (p *Project) Log(services ...string) error { - return p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Do(nil, NoEvent, NoEvent, func(service Service) error { - return service.Log() +func (p *Project) removeOrphanContainers() error { + client := p.clientFactory.Create(nil) + filter := filters.NewArgs() + filter.Add("label", labels.PROJECT.EqString(p.Name)) + containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ + Filter: filter, + }) + if err != nil { + return err + } + currentServices := map[string]struct{}{} + for _, serviceName := range p.ServiceConfigs.Keys() { + currentServices[serviceName] = struct{}{} + } + for _, container := range containers { + serviceLabel := container.Labels[labels.SERVICE.Str()] + if _, ok := currentServices[serviceLabel]; !ok { + if err := client.ContainerKill(context.Background(), container.ID, "SIGKILL"); err != nil { + return err + } + if err := client.ContainerRemove(context.Background(), types.ContainerRemoveOptions{ + ContainerID: container.ID, + Force: true, + }); err != nil { + return err + } + } + } + return nil +} + +// Restart restarts the specified services (like docker restart). +func (p *Project) Restart(ctx context.Context, timeout int, services ...string) error { + return p.perform(events.ProjectRestartStart, events.ProjectRestartDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(wrappers, events.ServiceRestartStart, events.ServiceRestart, func(service Service) error { + return service.Restart(ctx, timeout) }) }), nil) } +// Port returns the public port for a port binding of the specified service. +func (p *Project) Port(ctx context.Context, index int, protocol, serviceName, privatePort string) (string, error) { + service, err := p.CreateService(serviceName) + if err != nil { + return "", err + } + + containers, err := service.Containers(ctx) + if err != nil { + return "", err + } + + if index < 1 || index > len(containers) { + return "", fmt.Errorf("Invalid index %d", index) + } + + return containers[index-1].Port(ctx, fmt.Sprintf("%s/%s", privatePort, protocol)) +} + +// Ps list containers for the specified services. +func (p *Project) Ps(ctx context.Context, onlyID bool, services ...string) (InfoSet, error) { + allInfo := InfoSet{} + for _, name := range p.ServiceConfigs.Keys() { + service, err := p.CreateService(name) + if err != nil { + return nil, err + } + + info, err := service.Info(ctx, onlyID) + if err != nil { + return nil, err + } + + allInfo = append(allInfo, info...) + } + return allInfo, nil +} + +// Start starts the specified services (like docker start). +func (p *Project) Start(ctx context.Context, services ...string) error { + return p.perform(events.ProjectStartStart, events.ProjectStartDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(wrappers, events.ServiceStartStart, events.ServiceStart, func(service Service) error { + return service.Start(ctx) + }) + }), nil) +} + +// Run executes a one off command (like `docker run image command`). +func (p *Project) Run(ctx context.Context, serviceName string, commandParts []string) (int, error) { + if !p.ServiceConfigs.Has(serviceName) { + return 1, fmt.Errorf("%s is not defined in the template", serviceName) + } + + var exitCode int + err := p.forEach([]string{}, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(wrappers, events.ServiceRunStart, events.ServiceRun, func(service Service) error { + if service.Name() == serviceName { + code, err := service.Run(ctx, commandParts) + exitCode = code + return err + } + return nil + }) + }), func(service Service) error { + return service.Create(ctx, options.Create{}) + }) + return exitCode, err +} + +// Up creates and starts the specified services (kinda like docker run). +func (p *Project) Up(ctx context.Context, options options.Up, services ...string) error { + return p.perform(events.ProjectUpStart, events.ProjectUpDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(wrappers, events.ServiceUpStart, events.ServiceUp, func(service Service) error { + return service.Up(ctx, options) + }) + }), func(service Service) error { + return service.Create(ctx, options.Create) + }) +} + +// Log aggregates and prints out the logs for the specified services. +func (p *Project) Log(ctx context.Context, follow bool, services ...string) error { + return p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(nil, events.NoEvent, events.NoEvent, func(service Service) error { + return service.Log(ctx, follow) + }) + }), nil) +} + +// Scale scales the specified services. +func (p *Project) Scale(ctx context.Context, timeout int, servicesScale map[string]int) error { + // This code is a bit verbose but I wanted to parse everything up front + order := make([]string, 0, 0) + services := make(map[string]Service) + + for name := range servicesScale { + if !p.ServiceConfigs.Has(name) { + return fmt.Errorf("%s is not defined in the template", name) + } + + service, err := p.CreateService(name) + if err != nil { + return fmt.Errorf("Failed to lookup service: %s: %v", service, err) + } + + order = append(order, name) + services[name] = service + } + + for _, name := range order { + scale := servicesScale[name] + log.Infof("Setting scale %s=%d...", name, scale) + err := services[name].Scale(ctx, scale, timeout) + if err != nil { + return fmt.Errorf("Failed to set the scale %s=%d: %v", name, scale, err) + } + } + return nil +} + // Pull pulls the specified services (like docker pull). -func (p *Project) Pull(services ...string) error { +func (p *Project) Pull(ctx context.Context, services ...string) error { return p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Do(nil, EventServicePullStart, EventServicePull, func(service Service) error { - return service.Pull() + wrapper.Do(nil, events.ServicePullStart, events.ServicePull, func(service Service) error { + return service.Pull(ctx) }) }), nil) } +// listStoppedContainers lists the stopped containers for the specified services. +func (p *Project) listStoppedContainers(ctx context.Context, services ...string) ([]string, error) { + stoppedContainers := []string{} + err := p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(nil, events.NoEvent, events.NoEvent, func(service Service) error { + containers, innerErr := service.Containers(ctx) + if innerErr != nil { + return innerErr + } + + for _, container := range containers { + running, innerErr := container.IsRunning(ctx) + if innerErr != nil { + log.Error(innerErr) + } + if !running { + containerID, innerErr := container.ID() + if innerErr != nil { + log.Error(innerErr) + } + stoppedContainers = append(stoppedContainers, containerID) + } + } + + return nil + }) + }), nil) + if err != nil { + return nil, err + } + return stoppedContainers, nil +} + // Delete removes the specified services (like docker rm). -func (p *Project) Delete(services ...string) error { - return p.perform(EventProjectDeleteStart, EventProjectDeleteDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Do(nil, EventServiceDeleteStart, EventServiceDelete, func(service Service) error { - return service.Delete() +func (p *Project) Delete(ctx context.Context, options options.Delete, services ...string) error { + stoppedContainers, err := p.listStoppedContainers(ctx, services...) + if err != nil { + return err + } + if len(stoppedContainers) == 0 { + p.Notify(events.ProjectDeleteDone, "", nil) + fmt.Println("No stopped containers") + return nil + } + if options.BeforeDeleteCallback != nil && !options.BeforeDeleteCallback(stoppedContainers) { + return nil + } + return p.perform(events.ProjectDeleteStart, events.ProjectDeleteDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(nil, events.ServiceDeleteStart, events.ServiceDelete, func(service Service) error { + return service.Delete(ctx, options) }) }), nil) } // Kill kills the specified services (like docker kill). -func (p *Project) Kill(services ...string) error { - return p.perform(EventProjectKillStart, EventProjectKillDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Do(nil, EventServiceKillStart, EventServiceKill, func(service Service) error { - return service.Kill() +func (p *Project) Kill(ctx context.Context, signal string, services ...string) error { + return p.perform(events.ProjectKillStart, events.ProjectKillDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(nil, events.ServiceKillStart, events.ServiceKill, func(service Service) error { + return service.Kill(ctx, signal) }) }), nil) } -func (p *Project) perform(start, done EventType, services []string, action wrapperAction, cycleAction serviceAction) error { +// Pause pauses the specified services containers (like docker pause). +func (p *Project) Pause(ctx context.Context, services ...string) error { + return p.perform(events.ProjectPauseStart, events.ProjectPauseDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(nil, events.ServicePauseStart, events.ServicePause, func(service Service) error { + return service.Pause(ctx) + }) + }), nil) +} + +// Unpause pauses the specified services containers (like docker pause). +func (p *Project) Unpause(ctx context.Context, services ...string) error { + return p.perform(events.ProjectUnpauseStart, events.ProjectUnpauseDone, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(nil, events.ServiceUnpauseStart, events.ServiceUnpause, func(service Service) error { + return service.Unpause(ctx) + }) + }), nil) +} + +func (p *Project) perform(start, done events.EventType, services []string, action wrapperAction, cycleAction serviceAction) error { p.Notify(start, "", nil) err := p.forEach(services, action, cycleAction) @@ -324,7 +588,7 @@ func (p *Project) traverse(start bool, selected map[string]bool, wrappers map[st wrapperList := []string{} if start { - for name := range p.Configs { + for _, name := range p.ServiceConfigs.Keys() { wrapperList = append(wrapperList, name) } } else { @@ -349,7 +613,9 @@ func (p *Project) traverse(start bool, selected map[string]bool, wrappers map[st launched := map[string]bool{} for _, wrapper := range wrappers { - p.startService(wrappers, []string{}, selected, launched, wrapper, action, cycleAction) + if err := p.startService(wrappers, []string{}, selected, launched, wrapper, action, cycleAction); err != nil { + return err + } } var firstError error @@ -380,25 +646,27 @@ func (p *Project) traverse(start bool, selected map[string]bool, wrappers map[st } // AddListener adds the specified listener to the project. -func (p *Project) AddListener(c chan<- Event) { +// This implements implicitly events.Emitter. +func (p *Project) AddListener(c chan<- events.Event) { if !p.hasListeners { for _, l := range p.listeners { close(l) } p.hasListeners = true - p.listeners = []chan<- Event{c} + p.listeners = []chan<- events.Event{c} } else { p.listeners = append(p.listeners, c) } } // Notify notifies all project listener with the specified eventType, service name and datas. -func (p *Project) Notify(eventType EventType, serviceName string, data map[string]string) { - if eventType == NoEvent { +// This implements implicitly events.Notifier interface. +func (p *Project) Notify(eventType events.EventType, serviceName string, data map[string]string) { + if eventType == events.NoEvent { return } - event := Event{ + event := events.Event{ EventType: eventType, ServiceName: serviceName, Data: data, diff --git a/vendor/github.com/docker/libcompose/project/project_test.go b/vendor/github.com/docker/libcompose/project/project_test.go deleted file mode 100644 index deed9f69..00000000 --- a/vendor/github.com/docker/libcompose/project/project_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package project - -import ( - "fmt" - "reflect" - "strings" - "testing" -) - -type TestServiceFactory struct { - Counts map[string]int -} - -type TestService struct { - factory *TestServiceFactory - name string - config *ServiceConfig - EmptyService - Count int -} - -func (t *TestService) Config() *ServiceConfig { - return t.config -} - -func (t *TestService) Name() string { - return t.name -} - -func (t *TestService) Create() error { - key := t.name + ".create" - t.factory.Counts[key] = t.factory.Counts[key] + 1 - return nil -} - -func (t *TestService) DependentServices() []ServiceRelationship { - return nil -} - -func (t *TestServiceFactory) Create(project *Project, name string, serviceConfig *ServiceConfig) (Service, error) { - return &TestService{ - factory: t, - config: serviceConfig, - name: name, - }, nil -} - -func TestTwoCall(t *testing.T) { - factory := &TestServiceFactory{ - Counts: map[string]int{}, - } - - p := NewProject(&Context{ - ServiceFactory: factory, - }) - p.Configs = map[string]*ServiceConfig{ - "foo": {}, - } - - if err := p.Create("foo"); err != nil { - t.Fatal(err) - } - - if err := p.Create("foo"); err != nil { - t.Fatal(err) - } - - if factory.Counts["foo.create"] != 2 { - t.Fatal("Failed to create twice") - } -} - -func TestEventEquality(t *testing.T) { - if fmt.Sprintf("%s", EventServiceStart) != "Started" || - fmt.Sprintf("%v", EventServiceStart) != "Started" { - t.Fatalf("EventServiceStart String() doesn't work: %s %v", EventServiceStart, EventServiceStart) - } - - if fmt.Sprintf("%s", EventServiceStart) != fmt.Sprintf("%s", EventServiceUp) { - t.Fatal("Event messages do not match") - } - - if EventServiceStart == EventServiceUp { - t.Fatal("Events match") - } -} - -func TestParseWithBadContent(t *testing.T) { - p := NewProject(&Context{ - ComposeBytes: []byte("garbage"), - }) - - err := p.Parse() - if err == nil { - t.Fatal("Should have failed parse") - } - - if !strings.HasPrefix(err.Error(), "Unknown resolution for 'garbage'") { - t.Fatalf("Should have failed parse: %#v", err) - } -} - -func TestParseWithGoodContent(t *testing.T) { - p := NewProject(&Context{ - ComposeBytes: []byte("not-garbage:\n image: foo"), - }) - - err := p.Parse() - if err != nil { - t.Fatal(err) - } -} - -type TestEnvironmentLookup struct { -} - -func (t *TestEnvironmentLookup) Lookup(key, serviceName string, config *ServiceConfig) []string { - return []string{fmt.Sprintf("%s=X", key)} -} - -func TestEnvironmentResolve(t *testing.T) { - factory := &TestServiceFactory{ - Counts: map[string]int{}, - } - - p := NewProject(&Context{ - ServiceFactory: factory, - EnvironmentLookup: &TestEnvironmentLookup{}, - }) - p.Configs = map[string]*ServiceConfig{ - "foo": { - Environment: NewMaporEqualSlice([]string{ - "A", - "A=", - "A=B", - }), - }, - } - - service, err := p.CreateService("foo") - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(service.Config().Environment.Slice(), []string{"A=X", "A=X", "A=B"}) { - t.Fatal("Invalid environment", service.Config().Environment.Slice()) - } -} diff --git a/vendor/github.com/docker/libcompose/project/service-wrapper.go b/vendor/github.com/docker/libcompose/project/service-wrapper.go index 679b36d3..d0654f9f 100644 --- a/vendor/github.com/docker/libcompose/project/service-wrapper.go +++ b/vendor/github.com/docker/libcompose/project/service-wrapper.go @@ -4,6 +4,7 @@ import ( "sync" log "github.com/Sirupsen/logrus" + "github.com/docker/libcompose/project/events" ) type serviceWrapper struct { @@ -55,7 +56,7 @@ func (s *serviceWrapper) Ignore() { defer s.done.Done() s.state = StateExecuted - s.project.Notify(EventServiceUpIgnored, s.service.Name(), nil) + s.project.Notify(events.ServiceUpIgnored, s.service.Name(), nil) } func (s *serviceWrapper) waitForDeps(wrappers map[string]*serviceWrapper) bool { @@ -70,7 +71,7 @@ func (s *serviceWrapper) waitForDeps(wrappers map[string]*serviceWrapper) bool { if wrapper, ok := wrappers[dep.Target]; ok { if wrapper.Wait() == ErrRestart { - s.project.Notify(EventProjectReload, wrapper.service.Name(), nil) + s.project.Notify(events.ProjectReload, wrapper.service.Name(), nil) s.err = ErrRestart return false } @@ -82,7 +83,7 @@ func (s *serviceWrapper) waitForDeps(wrappers map[string]*serviceWrapper) bool { return true } -func (s *serviceWrapper) Do(wrappers map[string]*serviceWrapper, start, done EventType, action func(service Service) error) { +func (s *serviceWrapper) Do(wrappers map[string]*serviceWrapper, start, done events.EventType, action func(service Service) error) { defer s.done.Done() if s.state == StateExecuted { @@ -100,7 +101,7 @@ func (s *serviceWrapper) Do(wrappers map[string]*serviceWrapper, start, done Eve s.err = action(s.service) if s.err == ErrRestart { s.project.Notify(done, s.service.Name(), nil) - s.project.Notify(EventProjectReloadTrigger, s.service.Name(), nil) + s.project.Notify(events.ProjectReloadTrigger, s.service.Name(), nil) } else if s.err != nil { log.Errorf("Failed %s %s : %v", start, s.name, s.err) } else { diff --git a/vendor/github.com/docker/libcompose/project/service.go b/vendor/github.com/docker/libcompose/project/service.go new file mode 100644 index 00000000..f40478a1 --- /dev/null +++ b/vendor/github.com/docker/libcompose/project/service.go @@ -0,0 +1,92 @@ +package project + +import ( + "errors" + + "golang.org/x/net/context" + + "github.com/docker/libcompose/config" + "github.com/docker/libcompose/project/options" +) + +// Service defines what a libcompose service provides. +type Service interface { + Build(ctx context.Context, buildOptions options.Build) error + Create(ctx context.Context, options options.Create) error + Delete(ctx context.Context, options options.Delete) error + Info(ctx context.Context, qFlag bool) (InfoSet, error) + Log(ctx context.Context, follow bool) error + Kill(ctx context.Context, signal string) error + Pause(ctx context.Context) error + Pull(ctx context.Context) error + Restart(ctx context.Context, timeout int) error + Run(ctx context.Context, commandParts []string) (int, error) + Scale(ctx context.Context, count int, timeout int) error + Start(ctx context.Context) error + Stop(ctx context.Context, timeout int) error + Unpause(ctx context.Context) error + Up(ctx context.Context, options options.Up) error + + RemoveImage(ctx context.Context, imageType options.ImageType) error + Containers(ctx context.Context) ([]Container, error) + DependentServices() []ServiceRelationship + Config() *config.ServiceConfig + Name() string +} + +// ServiceState holds the state of a service. +type ServiceState string + +// State definitions +var ( + StateExecuted = ServiceState("executed") + StateUnknown = ServiceState("unknown") +) + +// Error definitions +var ( + ErrRestart = errors.New("Restart execution") + ErrUnsupported = errors.New("UnsupportedOperation") +) + +// ServiceFactory is an interface factory to create Service object for the specified +// project, with the specified name and service configuration. +type ServiceFactory interface { + Create(project *Project, name string, serviceConfig *config.ServiceConfig) (Service, error) +} + +// ServiceRelationshipType defines the type of service relationship. +type ServiceRelationshipType string + +// RelTypeLink means the services are linked (docker links). +const RelTypeLink = ServiceRelationshipType("") + +// RelTypeNetNamespace means the services share the same network namespace. +const RelTypeNetNamespace = ServiceRelationshipType("netns") + +// RelTypeIpcNamespace means the service share the same ipc namespace. +const RelTypeIpcNamespace = ServiceRelationshipType("ipc") + +// RelTypeVolumesFrom means the services share some volumes. +const RelTypeVolumesFrom = ServiceRelationshipType("volumesFrom") + +// RelTypeDependsOn means the dependency was explicitly set using 'depends_on'. +const RelTypeDependsOn = ServiceRelationshipType("dependsOn") + +// ServiceRelationship holds the relationship information between two services. +type ServiceRelationship struct { + Target, Alias string + Type ServiceRelationshipType + Optional bool +} + +// NewServiceRelationship creates a new Relationship based on the specified alias +// and relationship type. +func NewServiceRelationship(nameAlias string, relType ServiceRelationshipType) ServiceRelationship { + name, alias := NameAlias(nameAlias) + return ServiceRelationship{ + Target: name, + Alias: alias, + Type: relType, + } +} diff --git a/vendor/github.com/docker/libcompose/project/types.go b/vendor/github.com/docker/libcompose/project/types.go deleted file mode 100644 index c0b12915..00000000 --- a/vendor/github.com/docker/libcompose/project/types.go +++ /dev/null @@ -1,295 +0,0 @@ -package project - -import "fmt" - -// EventType defines a type of libcompose event. -type EventType int - -// Definitions of libcompose events -const ( - NoEvent = EventType(iota) - - EventContainerCreated = EventType(iota) - EventContainerStarted = EventType(iota) - - EventServiceAdd = EventType(iota) - EventServiceUpStart = EventType(iota) - EventServiceUpIgnored = EventType(iota) - EventServiceUp = EventType(iota) - EventServiceCreateStart = EventType(iota) - EventServiceCreate = EventType(iota) - EventServiceDeleteStart = EventType(iota) - EventServiceDelete = EventType(iota) - EventServiceDownStart = EventType(iota) - EventServiceDown = EventType(iota) - EventServiceRestartStart = EventType(iota) - EventServiceRestart = EventType(iota) - EventServicePullStart = EventType(iota) - EventServicePull = EventType(iota) - EventServiceKillStart = EventType(iota) - EventServiceKill = EventType(iota) - EventServiceStartStart = EventType(iota) - EventServiceStart = EventType(iota) - EventServiceBuildStart = EventType(iota) - EventServiceBuild = EventType(iota) - - EventProjectDownStart = EventType(iota) - EventProjectDownDone = EventType(iota) - EventProjectCreateStart = EventType(iota) - EventProjectCreateDone = EventType(iota) - EventProjectUpStart = EventType(iota) - EventProjectUpDone = EventType(iota) - EventProjectDeleteStart = EventType(iota) - EventProjectDeleteDone = EventType(iota) - EventProjectRestartStart = EventType(iota) - EventProjectRestartDone = EventType(iota) - EventProjectReload = EventType(iota) - EventProjectReloadTrigger = EventType(iota) - EventProjectKillStart = EventType(iota) - EventProjectKillDone = EventType(iota) - EventProjectStartStart = EventType(iota) - EventProjectStartDone = EventType(iota) - EventProjectBuildStart = EventType(iota) - EventProjectBuildDone = EventType(iota) -) - -func (e EventType) String() string { - var m string - switch e { - case EventContainerCreated: - m = "Created container" - case EventContainerStarted: - m = "Started container" - - case EventServiceAdd: - m = "Adding" - case EventServiceUpStart: - m = "Starting" - case EventServiceUpIgnored: - m = "Ignoring" - case EventServiceUp: - m = "Started" - case EventServiceCreateStart: - m = "Creating" - case EventServiceCreate: - m = "Created" - case EventServiceDeleteStart: - m = "Deleting" - case EventServiceDelete: - m = "Deleted" - case EventServiceDownStart: - m = "Stopping" - case EventServiceDown: - m = "Stopped" - case EventServiceRestartStart: - m = "Restarting" - case EventServiceRestart: - m = "Restarted" - case EventServicePullStart: - m = "Pulling" - case EventServicePull: - m = "Pulled" - case EventServiceKillStart: - m = "Killing" - case EventServiceKill: - m = "Killed" - case EventServiceStartStart: - m = "Starting" - case EventServiceStart: - m = "Started" - case EventServiceBuildStart: - m = "Building" - case EventServiceBuild: - m = "Built" - - case EventProjectDownStart: - m = "Stopping project" - case EventProjectDownDone: - m = "Project stopped" - case EventProjectCreateStart: - m = "Creating project" - case EventProjectCreateDone: - m = "Project created" - case EventProjectUpStart: - m = "Starting project" - case EventProjectUpDone: - m = "Project started" - case EventProjectDeleteStart: - m = "Deleting project" - case EventProjectDeleteDone: - m = "Project deleted" - case EventProjectRestartStart: - m = "Restarting project" - case EventProjectRestartDone: - m = "Project restarted" - case EventProjectReload: - m = "Reloading project" - case EventProjectReloadTrigger: - m = "Triggering project reload" - case EventProjectKillStart: - m = "Killing project" - case EventProjectKillDone: - m = "Project killed" - case EventProjectStartStart: - m = "Starting project" - case EventProjectStartDone: - m = "Project started" - case EventProjectBuildStart: - m = "Building project" - case EventProjectBuildDone: - m = "Project built" - } - - if m == "" { - m = fmt.Sprintf("EventType: %d", int(e)) - } - - return m -} - -// InfoPart holds key/value strings. -type InfoPart struct { - Key, Value string -} - -// InfoSet holds a list of Info. -type InfoSet []Info - -// Info holds a list of InfoPart. -type Info []InfoPart - -// ServiceConfig holds libcompose service configuration -type ServiceConfig struct { - Build string `yaml:"build,omitempty"` - CapAdd []string `yaml:"cap_add,omitempty"` - CapDrop []string `yaml:"cap_drop,omitempty"` - CPUSet string `yaml:"cpuset,omitempty"` - CPUShares int64 `yaml:"cpu_shares,omitempty"` - Command Command `yaml:"command,flow,omitempty"` - ContainerName string `yaml:"container_name,omitempty"` - Devices []string `yaml:"devices,omitempty"` - DNS Stringorslice `yaml:"dns,omitempty"` - DNSSearch Stringorslice `yaml:"dns_search,omitempty"` - Dockerfile string `yaml:"dockerfile,omitempty"` - DomainName string `yaml:"domainname,omitempty"` - Entrypoint Command `yaml:"entrypoint,flow,omitempty"` - EnvFile Stringorslice `yaml:"env_file,omitempty"` - Environment MaporEqualSlice `yaml:"environment,omitempty"` - Hostname string `yaml:"hostname,omitempty"` - Image string `yaml:"image,omitempty"` - Labels SliceorMap `yaml:"labels,omitempty"` - Links MaporColonSlice `yaml:"links,omitempty"` - LogDriver string `yaml:"log_driver,omitempty"` - MemLimit int64 `yaml:"mem_limit,omitempty"` - MemSwapLimit int64 `yaml:"memswap_limit,omitempty"` - Name string `yaml:"name,omitempty"` - Net string `yaml:"net,omitempty"` - Pid string `yaml:"pid,omitempty"` - Uts string `yaml:"uts,omitempty"` - Ipc string `yaml:"ipc,omitempty"` - Ports []string `yaml:"ports,omitempty"` - Privileged bool `yaml:"privileged,omitempty"` - Restart string `yaml:"restart,omitempty"` - ReadOnly bool `yaml:"read_only,omitempty"` - StdinOpen bool `yaml:"stdin_open,omitempty"` - SecurityOpt []string `yaml:"security_opt,omitempty"` - Tty bool `yaml:"tty,omitempty"` - User string `yaml:"user,omitempty"` - VolumeDriver string `yaml:"volume_driver,omitempty"` - Volumes []string `yaml:"volumes,omitempty"` - VolumesFrom []string `yaml:"volumes_from,omitempty"` - WorkingDir string `yaml:"working_dir,omitempty"` - Expose []string `yaml:"expose,omitempty"` - ExternalLinks []string `yaml:"external_links,omitempty"` - LogOpt map[string]string `yaml:"log_opt,omitempty"` - ExtraHosts []string `yaml:"extra_hosts,omitempty"` -} - -// EnvironmentLookup defines methods to provides environment variable loading. -type EnvironmentLookup interface { - Lookup(key, serviceName string, config *ServiceConfig) []string -} - -// ConfigLookup defines methods to provides file loading. -type ConfigLookup interface { - Lookup(file, relativeTo string) ([]byte, string, error) -} - -// Project holds libcompose project information. -type Project struct { - Name string - Configs map[string]*ServiceConfig - File string - ReloadCallback func() error - context *Context - reload []string - upCount int - listeners []chan<- Event - hasListeners bool -} - -// Service defines what a libcompose service provides. -type Service interface { - Info(qFlag bool) (InfoSet, error) - Name() string - Build() error - Create() error - Up() error - Start() error - Down() error - Delete() error - Restart() error - Log() error - Pull() error - Kill() error - Config() *ServiceConfig - DependentServices() []ServiceRelationship - Containers() ([]Container, error) - Scale(count int) error -} - -// Container defines what a libcompose container provides. -type Container interface { - ID() (string, error) - Name() string - Port(port string) (string, error) -} - -// ServiceFactory is an interface factory to create Service object for the specified -// project, with the specified name and service configuration. -type ServiceFactory interface { - Create(project *Project, name string, serviceConfig *ServiceConfig) (Service, error) -} - -// ServiceRelationshipType defines the type of service relationship. -type ServiceRelationshipType string - -// RelTypeLink means the services are linked (docker links). -const RelTypeLink = ServiceRelationshipType("") - -// RelTypeNetNamespace means the services share the same network namespace. -const RelTypeNetNamespace = ServiceRelationshipType("netns") - -// RelTypeIpcNamespace means the service share the same ipc namespace. -const RelTypeIpcNamespace = ServiceRelationshipType("ipc") - -// RelTypeVolumesFrom means the services share some volumes. -const RelTypeVolumesFrom = ServiceRelationshipType("volumesFrom") - -// ServiceRelationship holds the relationship information between two services. -type ServiceRelationship struct { - Target, Alias string - Type ServiceRelationshipType - Optional bool -} - -// NewServiceRelationship creates a new Relationship based on the specified alias -// and relationship type. -func NewServiceRelationship(nameAlias string, relType ServiceRelationshipType) ServiceRelationship { - name, alias := NameAlias(nameAlias) - return ServiceRelationship{ - Target: name, - Alias: alias, - Type: relType, - } -} diff --git a/vendor/github.com/docker/libcompose/project/types_yaml.go b/vendor/github.com/docker/libcompose/project/types_yaml.go deleted file mode 100644 index fa3051df..00000000 --- a/vendor/github.com/docker/libcompose/project/types_yaml.go +++ /dev/null @@ -1,328 +0,0 @@ -package project - -import ( - "fmt" - "strings" - - "github.com/flynn/go-shlex" -) - -// Stringorslice represents a string or an array of strings. -// TODO use docker/docker/pkg/stringutils.StrSlice once 1.9.x is released. -type Stringorslice struct { - parts []string -} - -// MarshalYAML implements the Marshaller interface. -func (s Stringorslice) MarshalYAML() (tag string, value interface{}, err error) { - return "", s.parts, nil -} - -func toStrings(s []interface{}) ([]string, error) { - if len(s) == 0 { - return nil, nil - } - r := make([]string, len(s)) - for k, v := range s { - if sv, ok := v.(string); ok { - r[k] = sv - } else { - return nil, fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", v, v) - } - } - return r, nil -} - -// UnmarshalYAML implements the Unmarshaller interface. -func (s *Stringorslice) UnmarshalYAML(tag string, value interface{}) error { - switch value := value.(type) { - case []interface{}: - parts, err := toStrings(value) - if err != nil { - return err - } - s.parts = parts - case string: - s.parts = []string{value} - default: - return fmt.Errorf("Failed to unmarshal Stringorslice: %#v", value) - } - return nil -} - -// Len returns the number of parts of the Stringorslice. -func (s *Stringorslice) Len() int { - if s == nil { - return 0 - } - return len(s.parts) -} - -// Slice gets the parts of the StrSlice as a Slice of string. -func (s *Stringorslice) Slice() []string { - if s == nil { - return nil - } - return s.parts -} - -// NewStringorslice creates an Stringorslice based on the specified parts (as strings). -func NewStringorslice(parts ...string) Stringorslice { - return Stringorslice{parts} -} - -// Command represents a docker command, can be a string or an array of strings. -// FIXME why not use Stringorslice (type Command struct { Stringorslice } -type Command struct { - parts []string -} - -// MarshalYAML implements the Marshaller interface. -func (s Command) MarshalYAML() (tag string, value interface{}, err error) { - return "", s.parts, nil -} - -// UnmarshalYAML implements the Unmarshaller interface. -func (s *Command) UnmarshalYAML(tag string, value interface{}) error { - switch value := value.(type) { - case []interface{}: - parts, err := toStrings(value) - if err != nil { - return err - } - s.parts = parts - case string: - parts, err := shlex.Split(value) - if err != nil { - return err - } - s.parts = parts - default: - return fmt.Errorf("Failed to unmarshal Command: %#v", value) - } - return nil -} - -// ToString returns the parts of the command as a string (joined by spaces). -func (s *Command) ToString() string { - return strings.Join(s.parts, " ") -} - -// Slice gets the parts of the Command as a Slice of string. -func (s *Command) Slice() []string { - return s.parts -} - -// NewCommand create a Command based on the specified parts (as strings). -func NewCommand(parts ...string) Command { - return Command{parts} -} - -// SliceorMap represents a slice or a map of strings. -type SliceorMap struct { - parts map[string]string -} - -// MarshalYAML implements the Marshaller interface. -func (s SliceorMap) MarshalYAML() (tag string, value interface{}, err error) { - return "", s.parts, nil -} - -// UnmarshalYAML implements the Unmarshaller interface. -func (s *SliceorMap) UnmarshalYAML(tag string, value interface{}) error { - switch value := value.(type) { - case map[interface{}]interface{}: - parts := map[string]string{} - for k, v := range value { - if sk, ok := k.(string); ok { - if sv, ok := v.(string); ok { - parts[sk] = sv - } else { - return fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", v, v) - } - } else { - return fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", k, k) - } - } - s.parts = parts - case []interface{}: - parts := map[string]string{} - for _, s := range value { - if str, ok := s.(string); ok { - str := strings.TrimSpace(str) - keyValueSlice := strings.SplitN(str, "=", 2) - - key := keyValueSlice[0] - val := "" - if len(keyValueSlice) == 2 { - val = keyValueSlice[1] - } - parts[key] = val - } else { - return fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", s, s) - } - } - s.parts = parts - default: - return fmt.Errorf("Failed to unmarshal SliceorMap: %#v", value) - } - return nil -} - -// MapParts get the parts of the SliceorMap as a Map of string. -func (s *SliceorMap) MapParts() map[string]string { - if s == nil { - return nil - } - return s.parts -} - -// NewSliceorMap creates a new SliceorMap based on the specified parts (as map of string). -func NewSliceorMap(parts map[string]string) SliceorMap { - return SliceorMap{parts} -} - -// MaporEqualSlice represents a slice of strings that gets unmarshal from a -// YAML map into 'key=value' string. -type MaporEqualSlice struct { - parts []string -} - -// MarshalYAML implements the Marshaller interface. -func (s MaporEqualSlice) MarshalYAML() (tag string, value interface{}, err error) { - return "", s.parts, nil -} - -func toSepMapParts(value map[interface{}]interface{}, sep string) ([]string, error) { - if len(value) == 0 { - return nil, nil - } - parts := make([]string, 0, len(value)) - for k, v := range value { - if sk, ok := k.(string); ok { - if sv, ok := v.(string); ok { - parts = append(parts, sk+sep+sv) - } else { - return nil, fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", v, v) - } - } else { - return nil, fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", k, k) - } - } - return parts, nil -} - -// UnmarshalYAML implements the Unmarshaller interface. -func (s *MaporEqualSlice) UnmarshalYAML(tag string, value interface{}) error { - switch value := value.(type) { - case []interface{}: - parts, err := toStrings(value) - if err != nil { - return err - } - s.parts = parts - case map[interface{}]interface{}: - parts, err := toSepMapParts(value, "=") - if err != nil { - return err - } - s.parts = parts - default: - return fmt.Errorf("Failed to unmarshal MaporEqualSlice: %#v", value) - } - return nil -} - -// Slice gets the parts of the MaporEqualSlice as a Slice of string. -func (s *MaporEqualSlice) Slice() []string { - return s.parts -} - -// NewMaporEqualSlice creates a new MaporEqualSlice based on the specified parts. -func NewMaporEqualSlice(parts []string) MaporEqualSlice { - return MaporEqualSlice{parts} -} - -// MaporColonSlice represents a slice of strings that gets unmarshal from a -// YAML map into 'key:value' string. -type MaporColonSlice struct { - parts []string -} - -// MarshalYAML implements the Marshaller interface. -func (s MaporColonSlice) MarshalYAML() (tag string, value interface{}, err error) { - return "", s.parts, nil -} - -// UnmarshalYAML implements the Unmarshaller interface. -func (s *MaporColonSlice) UnmarshalYAML(tag string, value interface{}) error { - switch value := value.(type) { - case []interface{}: - parts, err := toStrings(value) - if err != nil { - return err - } - s.parts = parts - case map[interface{}]interface{}: - parts, err := toSepMapParts(value, ":") - if err != nil { - return err - } - s.parts = parts - default: - return fmt.Errorf("Failed to unmarshal MaporColonSlice: %#v", value) - } - return nil -} - -// Slice gets the parts of the MaporColonSlice as a Slice of string. -func (s *MaporColonSlice) Slice() []string { - return s.parts -} - -// NewMaporColonSlice creates a new MaporColonSlice based on the specified parts. -func NewMaporColonSlice(parts []string) MaporColonSlice { - return MaporColonSlice{parts} -} - -// MaporSpaceSlice represents a slice of strings that gets unmarshal from a -// YAML map into 'key value' string. -type MaporSpaceSlice struct { - parts []string -} - -// MarshalYAML implements the Marshaller interface. -func (s MaporSpaceSlice) MarshalYAML() (tag string, value interface{}, err error) { - return "", s.parts, nil -} - -// UnmarshalYAML implements the Unmarshaller interface. -func (s *MaporSpaceSlice) UnmarshalYAML(tag string, value interface{}) error { - switch value := value.(type) { - case []interface{}: - parts, err := toStrings(value) - if err != nil { - return err - } - s.parts = parts - case map[interface{}]interface{}: - parts, err := toSepMapParts(value, " ") - if err != nil { - return err - } - s.parts = parts - default: - return fmt.Errorf("Failed to unmarshal MaporSpaceSlice: %#v", value) - } - return nil -} - -// Slice gets the parts of the MaporSpaceSlice as a Slice of string. -func (s *MaporSpaceSlice) Slice() []string { - return s.parts -} - -// NewMaporSpaceSlice creates a new MaporSpaceSlice based on the specified parts. -func NewMaporSpaceSlice(parts []string) MaporSpaceSlice { - return MaporSpaceSlice{parts} -} diff --git a/vendor/github.com/docker/libcompose/project/types_yaml_test.go b/vendor/github.com/docker/libcompose/project/types_yaml_test.go deleted file mode 100644 index f57b07e0..00000000 --- a/vendor/github.com/docker/libcompose/project/types_yaml_test.go +++ /dev/null @@ -1,235 +0,0 @@ -package project - -import ( - "fmt" - "strings" - "testing" - - yaml "github.com/cloudfoundry-incubator/candiedyaml" - - "github.com/stretchr/testify/assert" -) - -type StructStringorslice struct { - Foo Stringorslice -} - -type TestConfig struct { - SystemContainers map[string]*ServiceConfig -} - -func newTestConfig() TestConfig { - return TestConfig{ - SystemContainers: map[string]*ServiceConfig{ - "udev": { - Image: "udev", - Restart: "always", - Net: "host", - Privileged: true, - DNS: Stringorslice{[]string{"8.8.8.8", "8.8.4.4"}}, - Environment: MaporEqualSlice{[]string{ - "DAEMON=true", - }}, - Labels: SliceorMap{map[string]string{ - "io.rancher.os.detach": "true", - "io.rancher.os.scope": "system", - }}, - VolumesFrom: []string{ - "system-volumes", - }, - }, - "system-volumes": { - Image: "state", - Net: "none", - ReadOnly: true, - Privileged: true, - Labels: SliceorMap{map[string]string{ - "io.rancher.os.createonly": "true", - "io.rancher.os.scope": "system", - }}, - Volumes: []string{ - "/dev:/host/dev", - "/var/lib/rancher/conf:/var/lib/rancher/conf", - "/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt.rancher", - "/lib/modules:/lib/modules", - "/lib/firmware:/lib/firmware", - "/var/run:/var/run", - "/var/log:/var/log", - }, - LogDriver: "json-file", - }, - }, - } -} - -func TestMarshalConfig(t *testing.T) { - config := newTestConfig() - bytes, err := yaml.Marshal(config) - assert.Nil(t, err) - - config2 := TestConfig{} - - err = yaml.Unmarshal(bytes, &config2) - assert.Nil(t, err) - - assert.Equal(t, config, config2) -} - -func TestMarshalServiceConfig(t *testing.T) { - configPtr := newTestConfig().SystemContainers["udev"] - bytes, err := yaml.Marshal(configPtr) - assert.Nil(t, err) - - configPtr2 := &ServiceConfig{} - - err = yaml.Unmarshal(bytes, configPtr2) - assert.Nil(t, err) - - assert.Equal(t, configPtr, configPtr2) -} - -func TestStringorsliceYaml(t *testing.T) { - str := `{foo: [bar, baz]}` - - s := StructStringorslice{} - yaml.Unmarshal([]byte(str), &s) - - assert.Equal(t, []string{"bar", "baz"}, s.Foo.parts) - - d, err := yaml.Marshal(&s) - assert.Nil(t, err) - - s2 := StructStringorslice{} - yaml.Unmarshal(d, &s2) - - assert.Equal(t, []string{"bar", "baz"}, s2.Foo.parts) -} - -type StructSliceorMap struct { - Foos SliceorMap `yaml:"foos,omitempty"` - Bars []string `yaml:"bars"` -} - -type StructCommand struct { - Entrypoint Command `yaml:"entrypoint,flow,omitempty"` - Command Command `yaml:"command,flow,omitempty"` -} - -func TestSliceOrMapYaml(t *testing.T) { - str := `{foos: [bar=baz, far=faz]}` - - s := StructSliceorMap{} - yaml.Unmarshal([]byte(str), &s) - - assert.Equal(t, map[string]string{"bar": "baz", "far": "faz"}, s.Foos.parts) - - d, err := yaml.Marshal(&s) - assert.Nil(t, err) - - s2 := StructSliceorMap{} - yaml.Unmarshal(d, &s2) - - assert.Equal(t, map[string]string{"bar": "baz", "far": "faz"}, s2.Foos.parts) -} - -var sampleStructSliceorMap = ` -foos: - io.rancher.os.bar: baz - io.rancher.os.far: true -bars: [] -` - -func TestUnmarshalSliceOrMap(t *testing.T) { - s := StructSliceorMap{} - err := yaml.Unmarshal([]byte(sampleStructSliceorMap), &s) - assert.Equal(t, fmt.Errorf("Cannot unmarshal 'true' of type bool into a string value"), err) -} - -func TestStr2SliceOrMapPtrMap(t *testing.T) { - s := map[string]*StructSliceorMap{"udav": { - Foos: SliceorMap{map[string]string{"io.rancher.os.bar": "baz", "io.rancher.os.far": "true"}}, - Bars: []string{}, - }} - d, err := yaml.Marshal(&s) - assert.Nil(t, err) - - s2 := map[string]*StructSliceorMap{} - yaml.Unmarshal(d, &s2) - - assert.Equal(t, s, s2) -} - -type StructMaporslice struct { - Foo MaporEqualSlice -} - -func contains(list []string, item string) bool { - for _, test := range list { - if test == item { - return true - } - } - return false -} - -func TestMaporsliceYaml(t *testing.T) { - str := `{foo: {bar: baz, far: faz}}` - - s := StructMaporslice{} - yaml.Unmarshal([]byte(str), &s) - - assert.Equal(t, 2, len(s.Foo.parts)) - assert.True(t, contains(s.Foo.parts, "bar=baz")) - assert.True(t, contains(s.Foo.parts, "far=faz")) - - d, err := yaml.Marshal(&s) - assert.Nil(t, err) - - s2 := StructMaporslice{} - yaml.Unmarshal(d, &s2) - - assert.Equal(t, 2, len(s2.Foo.parts)) - assert.True(t, contains(s2.Foo.parts, "bar=baz")) - assert.True(t, contains(s2.Foo.parts, "far=faz")) -} - -var sampleStructCommand = `command: bash` - -func TestUnmarshalCommand(t *testing.T) { - s := &StructCommand{} - err := yaml.Unmarshal([]byte(sampleStructCommand), s) - - assert.Nil(t, err) - assert.Equal(t, []string{"bash"}, s.Command.Slice()) - assert.Nil(t, s.Entrypoint.Slice()) - - bytes, err := yaml.Marshal(s) - assert.Nil(t, err) - - s2 := &StructCommand{} - err = yaml.Unmarshal(bytes, s2) - - assert.Nil(t, err) - assert.Equal(t, []string{"bash"}, s2.Command.Slice()) - assert.Nil(t, s2.Entrypoint.Slice()) -} - -var sampleEmptyCommand = `{}` - -func TestUnmarshalEmptyCommand(t *testing.T) { - s := &StructCommand{} - err := yaml.Unmarshal([]byte(sampleEmptyCommand), s) - - assert.Nil(t, err) - assert.Nil(t, s.Command.Slice()) - - bytes, err := yaml.Marshal(s) - assert.Nil(t, err) - assert.Equal(t, "entrypoint: []\ncommand: []", strings.TrimSpace(string(bytes))) - - s2 := &StructCommand{} - err = yaml.Unmarshal(bytes, s2) - - assert.Nil(t, err) - assert.Nil(t, s2.Command.Slice()) -} diff --git a/vendor/github.com/docker/libcompose/project/utils.go b/vendor/github.com/docker/libcompose/project/utils.go index 7b5f6a56..cb7bec1f 100644 --- a/vendor/github.com/docker/libcompose/project/utils.go +++ b/vendor/github.com/docker/libcompose/project/utils.go @@ -3,7 +3,7 @@ package project import ( "strings" - "github.com/docker/docker/runconfig" + "github.com/docker/engine-api/types/container" ) // DefaultDependentServices return the dependent services (as an array of ServiceRelationship) @@ -15,7 +15,7 @@ func DefaultDependentServices(p *Project, s Service) []ServiceRelationship { } result := []ServiceRelationship{} - for _, link := range config.Links.Slice() { + for _, link := range config.Links { result = append(result, NewServiceRelationship(link, RelTypeLink)) } @@ -23,7 +23,11 @@ func DefaultDependentServices(p *Project, s Service) []ServiceRelationship { result = append(result, NewServiceRelationship(volumesFrom, RelTypeVolumesFrom)) } - result = appendNs(p, result, s.Config().Net, RelTypeNetNamespace) + for _, dependsOn := range config.DependsOn { + result = append(result, NewServiceRelationship(dependsOn, RelTypeDependsOn)) + } + + result = appendNs(p, result, s.Config().NetworkMode, RelTypeNetNamespace) result = appendNs(p, result, s.Config().Ipc, RelTypeIpcNamespace) return result @@ -51,7 +55,7 @@ func NameAlias(name string) (string, string) { // GetContainerFromIpcLikeConfig returns name of the service that shares the IPC // namespace with the specified service. func GetContainerFromIpcLikeConfig(p *Project, conf string) string { - ipc := runconfig.IpcMode(conf) + ipc := container.IpcMode(conf) if !ipc.IsContainer() { return "" } @@ -61,7 +65,7 @@ func GetContainerFromIpcLikeConfig(p *Project, conf string) string { return "" } - if _, ok := p.Configs[name]; ok { + if p.ServiceConfigs.Has(name) { return name } return "" diff --git a/vendor/github.com/docker/libcompose/project/yaml_test.go b/vendor/github.com/docker/libcompose/project/yaml_test.go deleted file mode 100644 index 1368f7e0..00000000 --- a/vendor/github.com/docker/libcompose/project/yaml_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package project - -import ( - "testing" - - yaml "github.com/cloudfoundry-incubator/candiedyaml" - - "github.com/stretchr/testify/assert" -) - -type structStringorslice struct { - Foo Stringorslice `yaml:"foo,flow,omitempty"` -} - -func TestMarshal(t *testing.T) { - s := &structStringorslice{Foo: NewStringorslice("a", "b", "c")} - b, err := yaml.Marshal(s) - assert.Equal(t, "foo: [a, b, c]\n", string(b)) - assert.Nil(t, err) -} - -func TestMarshalEmpty(t *testing.T) { - s := &structStringorslice{} - b, err := yaml.Marshal(s) - assert.Equal(t, "foo: []\n", string(b)) - assert.Nil(t, err) -} - -func TestUnmarshalSlice(t *testing.T) { - expected := &structStringorslice{Foo: NewStringorslice("a", "b", "c")} - b := []byte("foo: [a, b, c]\n") - s := &structStringorslice{} - err := yaml.Unmarshal(b, s) - assert.Equal(t, expected, s) - assert.Nil(t, err) -} - -func TestUnmarshalString(t *testing.T) { - expected := &structStringorslice{Foo: NewStringorslice("abc")} - b := []byte("foo: abc\n") - s := &structStringorslice{} - err := yaml.Unmarshal(b, s) - assert.Equal(t, expected, s) - assert.Nil(t, err) -} - -func TestUnmarshalEmpty(t *testing.T) { - expected := &structStringorslice{Foo: NewStringorslice()} - b := []byte("{}\n") - s := &structStringorslice{} - err := yaml.Unmarshal(b, s) - assert.Equal(t, expected, s) - assert.Nil(t, err) -} diff --git a/vendor/github.com/docker/libcompose/utils/util.go b/vendor/github.com/docker/libcompose/utils/util.go index 36b522f3..ac187636 100644 --- a/vendor/github.com/docker/libcompose/utils/util.go +++ b/vendor/github.com/docker/libcompose/utils/util.go @@ -2,7 +2,6 @@ package utils import ( "encoding/json" - "fmt" "sync" "github.com/Sirupsen/logrus" @@ -17,7 +16,7 @@ type InParallel struct { pool sync.Pool } -// Add adds runs the specified task in parallel and add it to the waitGroup. +// Add runs the specified task in parallel and adds it to the waitGroup. func (i *InParallel) Add(task func() error) { i.wg.Add(1) @@ -30,7 +29,7 @@ func (i *InParallel) Add(task func() error) { }() } -// Wait waits for all tasks to complete and returns the latests error encountered if any. +// Wait waits for all tasks to complete and returns the latest error encountered if any. func (i *InParallel) Wait() error { i.wg.Wait() obj := i.pool.Get() @@ -112,22 +111,6 @@ func FilterString(data map[string][]string) string { return string(bytes) } -// LabelFilterString returns a label json string representation of the specifed couple (key,value) -// that is used as filter for docker. -func LabelFilterString(key, value string) string { - return FilterString(map[string][]string{ - "label": {fmt.Sprintf("%s=%s", key, value)}, - }) -} - -// LabelFilter returns a label map representation of the specifed couple (key,value) -// that is used as filter for docker. -func LabelFilter(key, value string) map[string][]string { - return map[string][]string{ - "label": {fmt.Sprintf("%s=%s", key, value)}, - } -} - // Contains checks if the specified string (key) is present in the specified collection. func Contains(collection []string, key string) bool { for _, value := range collection { diff --git a/vendor/github.com/docker/libcompose/utils/util_test.go b/vendor/github.com/docker/libcompose/utils/util_test.go deleted file mode 100644 index 7b379318..00000000 --- a/vendor/github.com/docker/libcompose/utils/util_test.go +++ /dev/null @@ -1,287 +0,0 @@ -package utils - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -type jsonfrom struct { - Element1 string `json:"element2"` - Element2 int `json:"element1"` -} -type jsonto struct { - Elt1 int `json:"element1"` - Elt2 string `json:"element2"` - Elt3 int -} - -func TestInParallel(t *testing.T) { - size := 5 - booleanMap := make(map[int]bool, size+1) - tasks := InParallel{} - for i := 0; i < size; i++ { - task := func(index int) func() error { - return func() error { - booleanMap[index] = true - return nil - } - }(i) - tasks.Add(task) - } - err := tasks.Wait() - if err != nil { - t.Fatal(err) - } - // Make sure every value is true - for _, value := range booleanMap { - if !value { - t.Fatalf("booleanMap expected to contain only true values, got at least one false") - } - } -} - -func TestInParallelError(t *testing.T) { - size := 5 - booleanMap := make(map[int]bool, size+1) - tasks := InParallel{} - for i := 0; i < size; i++ { - task := func(index int) func() error { - return func() error { - booleanMap[index] = true - if index%2 == 0 { - return fmt.Errorf("Error with %v", index) - } - return nil - } - }(i) - tasks.Add(task) - } - err := tasks.Wait() - if err == nil { - t.Fatalf("Expected an error on Wait, got nothing.") - } - for key, value := range booleanMap { - if key%2 != 0 && !value { - t.Fatalf("booleanMap expected to contain true values on odd number, got %v", booleanMap) - } - } -} - -func TestConvertByJSON(t *testing.T) { - valids := []struct { - src jsonfrom - expected jsonto - }{ - { - jsonfrom{Element2: 1}, - jsonto{1, "", 0}, - }, - { - jsonfrom{}, - jsonto{0, "", 0}, - }, - { - jsonfrom{"element1", 2}, - jsonto{2, "element1", 0}, - }, - } - for _, valid := range valids { - var target jsonto - err := ConvertByJSON(valid.src, &target) - if err != nil || target.Elt1 != valid.expected.Elt1 || target.Elt2 != valid.expected.Elt2 || target.Elt3 != 0 { - t.Fatalf("Expected %v from %v got %v, %v", valid.expected, valid.src, target, err) - } - } -} - -func TestConvertByJSONInvalid(t *testing.T) { - invalids := []interface{}{ - // Incompatible struct - struct { - Element1 int `json:"element2"` - Element2 string `json:"element1"` - }{1, "element1"}, - // Not marshable struct - struct { - Element1 func(int) int - }{ - func(i int) int { return 0 }, - }, - } - for _, invalid := range invalids { - var target jsonto - if err := ConvertByJSON(invalid, &target); err == nil { - t.Fatalf("Expected an error converting %v to %v, got nothing", invalid, target) - } - } -} - -type yamlfrom struct { - Element1 string `yaml:"element2"` - Element2 int `yaml:"element1"` -} -type yamlto struct { - Elt1 int `yaml:"element1"` - Elt2 string `yaml:"element2"` - Elt3 int -} - -func TestConvert(t *testing.T) { - valids := []struct { - src yamlfrom - expected yamlto - }{ - { - yamlfrom{Element2: 1}, - yamlto{1, "", 0}, - }, - { - yamlfrom{}, - yamlto{0, "", 0}, - }, - { - yamlfrom{"element1", 2}, - yamlto{2, "element1", 0}, - }, - } - for _, valid := range valids { - var target yamlto - err := Convert(valid.src, &target) - if err != nil || target.Elt1 != valid.expected.Elt1 || target.Elt2 != valid.expected.Elt2 || target.Elt3 != 0 { - t.Fatalf("Expected %v from %v got %v, %v", valid.expected, valid.src, target, err) - } - } -} - -func TestConvertInvalid(t *testing.T) { - invalids := []interface{}{ - // Incompatible struct - struct { - Element1 int `yaml:"element2"` - Element2 string `yaml:"element1"` - }{1, "element1"}, - // Not marshable struct - // This one panics :-| - // struct { - // Element1 func(int) int - // }{ - // func(i int) int { return 0 }, - // }, - } - for _, invalid := range invalids { - var target yamlto - if err := Convert(invalid, &target); err == nil { - t.Fatalf("Expected an error converting %v to %v, got nothing", invalid, target) - } - } -} - -func TestFilterStringSet(t *testing.T) { - s := map[string]bool{"abcd": true, "b": true, "cde": true, "d": true, "ef": true} - expected := map[string]bool{"abcd": true, "cde": true} - result := FilterStringSet(s, func(x string) bool { return len(x) > 2 }) - assert.Equal(t, expected, result) -} - -func TestFilterString(t *testing.T) { - datas := []struct { - value map[string][]string - expected string - }{ - { - map[string][]string{}, - "{}", - }, - { - map[string][]string{ - "key": {}, - }, - `{"key":[]}`, - }, - { - map[string][]string{ - "key": {"value1", "value2"}, - }, - `{"key":["value1","value2"]}`, - }, - { - map[string][]string{ - "key1": {"value1", "value2"}, - "key2": {"value3", "value4"}, - }, - `{"key1":["value1","value2"],"key2":["value3","value4"]}`, - }, - } - for _, data := range datas { - actual := FilterString(data.value) - if actual != data.expected { - t.Fatalf("Expected '%v' for %v, got '%v'", data.expected, data.value, actual) - } - } -} - -func TestLabelFilter(t *testing.T) { - filters := []struct { - key string - value string - expected string - }{ - { - "key", "value", `{"label":["key=value"]}`, - }, { - "key", "", `{"label":["key="]}`, - }, { - "", "", `{"label":["="]}`, - }, - } - for _, filter := range filters { - actual := LabelFilterString(filter.key, filter.value) - if actual != filter.expected { - t.Fatalf("Expected '%s for key=%s and value=%s, got %s", filter.expected, filter.key, filter.value, actual) - } - } -} - -func TestContains(t *testing.T) { - cases := []struct { - collection []string - key string - contains bool - }{ - { - []string{}, "", false, - }, - { - []string{""}, "", true, - }, - { - []string{"value1", "value2"}, "value3", false, - }, - { - []string{"value1", "value2"}, "value1", true, - }, - { - []string{"value1", "value2"}, "value2", true, - }, - } - for _, element := range cases { - actual := Contains(element.collection, element.key) - if actual != element.contains { - t.Fatalf("Expected contains to be %v for %v in %v, but was %v", element.contains, element.key, element.collection, actual) - } - } -} - -func TestMerge(t *testing.T) { - a := []string{"a", "b", "c"} - b := []string{"b", "c", "d", "e"} - expected := []string{"a", "b", "c", "d", "e"} - r := Merge(a, b) - assert.Equal(t, len(expected), len(r)) - for _, v := range expected { - assert.True(t, Contains(r, v)) - } - assert.Equal(t, "a:b", fmt.Sprint("a", ":", "b")) -} diff --git a/vendor/github.com/docker/libcompose/version/version.go b/vendor/github.com/docker/libcompose/version/version.go new file mode 100644 index 00000000..c1af66d4 --- /dev/null +++ b/vendor/github.com/docker/libcompose/version/version.go @@ -0,0 +1,12 @@ +package version + +var ( + // VERSION should be updated by hand at each release + VERSION = "0.3.0-dev" + + // GITCOMMIT will be overwritten automatically by the build system + GITCOMMIT = "HEAD" + + // BUILDTIME will be overwritten automatically by the build system + BUILDTIME = "" +) diff --git a/vendor/github.com/docker/libcompose/yaml/types_yaml.go b/vendor/github.com/docker/libcompose/yaml/types_yaml.go new file mode 100644 index 00000000..c1fdc103 --- /dev/null +++ b/vendor/github.com/docker/libcompose/yaml/types_yaml.go @@ -0,0 +1,288 @@ +package yaml + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/docker/engine-api/types/strslice" + "github.com/flynn/go-shlex" +) + +// Stringorslice represents a string or an array of strings. +// Using engine-api Strslice and augment it with YAML marshalling stuff. +type Stringorslice strslice.StrSlice + +// UnmarshalYAML implements the Unmarshaller interface. +func (s *Stringorslice) UnmarshalYAML(tag string, value interface{}) error { + switch value := value.(type) { + case []interface{}: + parts, err := toStrings(value) + if err != nil { + return err + } + *s = parts + case string: + *s = []string{value} + default: + return fmt.Errorf("Failed to unmarshal Stringorslice: %#v", value) + } + return nil +} + +// Ulimits represents a list of Ulimit. +// It is, however, represented in yaml as keys (and thus map in Go) +type Ulimits struct { + Elements []Ulimit +} + +// MarshalYAML implements the Marshaller interface. +func (u Ulimits) MarshalYAML() (tag string, value interface{}, err error) { + ulimitMap := make(map[string]Ulimit) + for _, ulimit := range u.Elements { + ulimitMap[ulimit.Name] = ulimit + } + return "", ulimitMap, nil +} + +// UnmarshalYAML implements the Unmarshaller interface. +func (u *Ulimits) UnmarshalYAML(tag string, value interface{}) error { + ulimits := make(map[string]Ulimit) + yamlUlimits := reflect.ValueOf(value) + switch yamlUlimits.Kind() { + case reflect.Map: + for _, key := range yamlUlimits.MapKeys() { + var name string + var soft, hard int64 + mapValue := yamlUlimits.MapIndex(key).Elem() + name = key.Elem().String() + switch mapValue.Kind() { + case reflect.Int64: + soft = mapValue.Int() + hard = mapValue.Int() + case reflect.Map: + if len(mapValue.MapKeys()) != 2 { + return fmt.Errorf("Failed to unmarshal Ulimit: %#v", mapValue) + } + for _, subKey := range mapValue.MapKeys() { + subValue := mapValue.MapIndex(subKey).Elem() + switch subKey.Elem().String() { + case "soft": + soft = subValue.Int() + case "hard": + hard = subValue.Int() + } + } + default: + return fmt.Errorf("Failed to unmarshal Ulimit: %#v, %v", mapValue, mapValue.Kind()) + } + ulimits[name] = Ulimit{ + Name: name, + ulimitValues: ulimitValues{ + Soft: soft, + Hard: hard, + }, + } + } + keys := make([]string, 0, len(ulimits)) + for key := range ulimits { + keys = append(keys, key) + } + sort.Strings(keys) + for _, key := range keys { + u.Elements = append(u.Elements, ulimits[key]) + } + default: + return fmt.Errorf("Failed to unmarshal Ulimit: %#v", value) + } + return nil +} + +// Ulimit represents ulimit information. +type Ulimit struct { + ulimitValues + Name string +} + +type ulimitValues struct { + Soft int64 `yaml:"soft"` + Hard int64 `yaml:"hard"` +} + +// MarshalYAML implements the Marshaller interface. +func (u Ulimit) MarshalYAML() (tag string, value interface{}, err error) { + if u.Soft == u.Hard { + return "", u.Soft, nil + } + return "", u.ulimitValues, err +} + +// NewUlimit creates a Ulimit based on the specified parts. +func NewUlimit(name string, soft int64, hard int64) Ulimit { + return Ulimit{ + Name: name, + ulimitValues: ulimitValues{ + Soft: soft, + Hard: hard, + }, + } +} + +// Command represents a docker command, can be a string or an array of strings. +type Command strslice.StrSlice + +// UnmarshalYAML implements the Unmarshaller interface. +func (s *Command) UnmarshalYAML(tag string, value interface{}) error { + switch value := value.(type) { + case []interface{}: + parts, err := toStrings(value) + if err != nil { + return err + } + *s = parts + case string: + parts, err := shlex.Split(value) + if err != nil { + return err + } + *s = parts + default: + return fmt.Errorf("Failed to unmarshal Command: %#v", value) + } + return nil +} + +// SliceorMap represents a slice or a map of strings. +type SliceorMap map[string]string + +// UnmarshalYAML implements the Unmarshaller interface. +func (s *SliceorMap) UnmarshalYAML(tag string, value interface{}) error { + switch value := value.(type) { + case map[interface{}]interface{}: + parts := map[string]string{} + for k, v := range value { + if sk, ok := k.(string); ok { + if sv, ok := v.(string); ok { + parts[sk] = sv + } else { + return fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", v, v) + } + } else { + return fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", k, k) + } + } + *s = parts + case []interface{}: + parts := map[string]string{} + for _, s := range value { + if str, ok := s.(string); ok { + str := strings.TrimSpace(str) + keyValueSlice := strings.SplitN(str, "=", 2) + + key := keyValueSlice[0] + val := "" + if len(keyValueSlice) == 2 { + val = keyValueSlice[1] + } + parts[key] = val + } else { + return fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", s, s) + } + } + *s = parts + default: + return fmt.Errorf("Failed to unmarshal SliceorMap: %#v", value) + } + return nil +} + +// MaporEqualSlice represents a slice of strings that gets unmarshal from a +// YAML map into 'key=value' string. +type MaporEqualSlice []string + +// UnmarshalYAML implements the Unmarshaller interface. +func (s *MaporEqualSlice) UnmarshalYAML(tag string, value interface{}) error { + parts, err := unmarshalToStringOrSepMapParts(value, "=") + if err != nil { + return err + } + *s = parts + return nil +} + +// MaporColonSlice represents a slice of strings that gets unmarshal from a +// YAML map into 'key:value' string. +type MaporColonSlice []string + +// UnmarshalYAML implements the Unmarshaller interface. +func (s *MaporColonSlice) UnmarshalYAML(tag string, value interface{}) error { + parts, err := unmarshalToStringOrSepMapParts(value, ":") + if err != nil { + return err + } + *s = parts + return nil +} + +// MaporSpaceSlice represents a slice of strings that gets unmarshal from a +// YAML map into 'key value' string. +type MaporSpaceSlice []string + +// UnmarshalYAML implements the Unmarshaller interface. +func (s *MaporSpaceSlice) UnmarshalYAML(tag string, value interface{}) error { + parts, err := unmarshalToStringOrSepMapParts(value, " ") + if err != nil { + return err + } + *s = parts + return nil +} + +func unmarshalToStringOrSepMapParts(value interface{}, key string) ([]string, error) { + switch value := value.(type) { + case []interface{}: + return toStrings(value) + case map[interface{}]interface{}: + return toSepMapParts(value, key) + default: + return nil, fmt.Errorf("Failed to unmarshal Map or Slice: %#v", value) + } +} + +func toSepMapParts(value map[interface{}]interface{}, sep string) ([]string, error) { + if len(value) == 0 { + return nil, nil + } + parts := make([]string, 0, len(value)) + for k, v := range value { + if sk, ok := k.(string); ok { + if sv, ok := v.(string); ok { + parts = append(parts, sk+sep+sv) + } else if sv, ok := v.(int64); ok { + parts = append(parts, sk+sep+strconv.FormatInt(sv, 10)) + } else { + return nil, fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", v, v) + } + } else { + return nil, fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", k, k) + } + } + return parts, nil +} + +func toStrings(s []interface{}) ([]string, error) { + if len(s) == 0 { + return nil, nil + } + r := make([]string, len(s)) + for k, v := range s { + if sv, ok := v.(string); ok { + r[k] = sv + } else { + return nil, fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", v, v) + } + } + return r, nil +} diff --git a/vendor/github.com/docker/libcontainer/.gitignore b/vendor/github.com/docker/libcontainer/.gitignore deleted file mode 100644 index 2e3f79b4..00000000 --- a/vendor/github.com/docker/libcontainer/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -bundles -nsinit/nsinit -vendor/pkg diff --git a/vendor/github.com/docker/libcontainer/CONTRIBUTING.md b/vendor/github.com/docker/libcontainer/CONTRIBUTING.md deleted file mode 100644 index 667cc5a6..00000000 --- a/vendor/github.com/docker/libcontainer/CONTRIBUTING.md +++ /dev/null @@ -1,257 +0,0 @@ -# The libcontainer Contributors' Guide - -Want to hack on libcontainer? Awesome! Here are instructions to get you -started. They are probably not perfect, please let us know if anything -feels wrong or incomplete. - -## Reporting Issues - -When reporting [issues](https://github.com/docker/libcontainer/issues) -on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc), -the output of `uname -a`. Please include the steps required to reproduce -the problem if possible and applicable. -This information will help us review and fix your issue faster. - -## Development Environment - -### Requirements - -For best results, use a Linux development environment. -The following packages are required to compile libcontainer natively. - -- Golang 1.3 -- GCC -- git -- cgutils - -You can develop on OSX, but you are limited to Dockerfile-based builds only. - -### Building libcontainer from Dockerfile - - make all - -This is the easiest way of building libcontainer. -As this build is done using Docker, you can even run this from [OSX](https://github.com/boot2docker/boot2docker) - -### Testing changes with "nsinit" - - make sh - -This will create an container that runs `nsinit exec sh` on a busybox rootfs with the configuration from ['minimal.json'](https://github.com/docker/libcontainer/blob/master/sample_configs/minimal.json). -Like the previous command, you can run this on OSX too! - -### Building libcontainer directly - -> Note: You should add the `vendor` directory to your GOPATH to use the vendored libraries - - ./update-vendor.sh - go get -d ./... - make direct-build - # Run the tests - make direct-test-short | egrep --color 'FAIL|$' - # Run all the test - make direct-test | egrep --color 'FAIL|$' - -### Testing Changes with "nsinit" directly - -To test a change: - - # Install nsinit - make direct-install - - # Optional, add a docker0 bridge - ip link add docker0 type bridge - ifconfig docker0 172.17.0.1/16 up - - mkdir testfs - curl -sSL https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.02/rootfs.tar | tar -xC testfs - cd testfs - cp container.json - nsinit exec sh - -## Contribution Guidelines - -### Pull requests are always welcome - -We are always thrilled to receive pull requests, and do our best to -process them as fast as possible. Not sure if that typo is worth a pull -request? Do it! We will appreciate it. - -If your pull request is not accepted on the first try, don't be -discouraged! If there's a problem with the implementation, hopefully you -received feedback on what to improve. - -We're trying very hard to keep libcontainer lean and focused. We don't want it -to do everything for everybody. This means that we might decide against -incorporating a new feature. However, there might be a way to implement -that feature *on top of* libcontainer. - -### Discuss your design on the mailing list - -We recommend discussing your plans [on the mailing -list](https://groups.google.com/forum/?fromgroups#!forum/libcontainer) -before starting to code - especially for more ambitious contributions. -This gives other contributors a chance to point you in the right -direction, give feedback on your design, and maybe point out if someone -else is working on the same thing. - -### Create issues... - -Any significant improvement should be documented as [a GitHub -issue](https://github.com/docker/libcontainer/issues) before anybody -starts working on it. - -### ...but check for existing issues first! - -Please take a moment to check that an issue doesn't already exist -documenting your bug report or improvement proposal. If it does, it -never hurts to add a quick "+1" or "I have this problem too". This will -help prioritize the most common problems and requests. - -### Conventions - -Fork the repo and make changes on your fork in a feature branch: - -- If it's a bugfix branch, name it XXX-something where XXX is the number of the - issue -- If it's a feature branch, create an enhancement issue to announce your - intentions, and name it XXX-something where XXX is the number of the issue. - -Submit unit tests for your changes. Go has a great test framework built in; use -it! Take a look at existing tests for inspiration. Run the full test suite on -your branch before submitting a pull request. - -Update the documentation when creating or modifying features. Test -your documentation changes for clarity, concision, and correctness, as -well as a clean documentation build. See ``docs/README.md`` for more -information on building the docs and how docs get released. - -Write clean code. Universally formatted code promotes ease of writing, reading, -and maintenance. Always run `gofmt -s -w file.go` on each changed file before -committing your changes. Most editors have plugins that do this automatically. - -Pull requests descriptions should be as clear as possible and include a -reference to all the issues that they address. - -Pull requests must not contain commits from other users or branches. - -Commit messages must start with a capitalized and short summary (max. 50 -chars) written in the imperative, followed by an optional, more detailed -explanatory text which is separated from the summary by an empty line. - -Code review comments may be added to your pull request. Discuss, then make the -suggested modifications and push additional commits to your feature branch. Be -sure to post a comment after pushing. The new commits will show up in the pull -request automatically, but the reviewers will not be notified unless you -comment. - -Before the pull request is merged, make sure that you squash your commits into -logical units of work using `git rebase -i` and `git push -f`. After every -commit the test suite should be passing. Include documentation changes in the -same commit so that a revert would remove all traces of the feature or fix. - -Commits that fix or close an issue should include a reference like `Closes #XXX` -or `Fixes #XXX`, which will automatically close the issue when merged. - -### Testing - -Make sure you include suitable tests, preferably unit tests, in your pull request -and that all the tests pass. - -*Instructions for running tests to be added.* - -### Merge approval - -libcontainer maintainers use LGTM (looks good to me) in comments on the code review -to indicate acceptance. - -A change requires LGTMs from at lease two maintainers. One of those must come from -a maintainer of the component affected. For example, if a change affects `netlink/` -and `security`, it needs at least one LGTM from a maintainer of each. Maintainers -only need one LGTM as presumably they LGTM their own change. - -For more details see [MAINTAINERS.md](MAINTAINERS.md) - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the -patch, which certifies that you wrote it or otherwise have the right to -pass it on as an open-source patch. The rules are pretty simple: if you -can certify the below (from -[developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -then you just add a line to every git commit message: - - Docker-DCO-1.1-Signed-off-by: Joe Smith (github: github_handle) - -using your real name (sorry, no pseudonyms or anonymous contributions.) - -One way to automate this, is customise your get ``commit.template`` by adding -a ``prepare-commit-msg`` hook to your libcontainer checkout: - -``` -curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/docker/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg -``` - -* Note: the above script expects to find your GitHub user name in ``git config --get github.user`` - -#### Small patch exception - -There are several exceptions to the signing requirement. Currently these are: - -* Your patch fixes spelling or grammar errors. -* Your patch is a single line change to documentation contained in the - `docs` directory. -* Your patch fixes Markdown formatting or syntax errors in the - documentation contained in the `docs` directory. - -If you have any questions, please refer to the FAQ in the [docs](to be written) - -### How can I become a maintainer? - -* Step 1: learn the component inside out -* Step 2: make yourself useful by contributing code, bugfixes, support etc. -* Step 3: volunteer on the irc channel (#libcontainer@freenode) - -Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. -You don't have to be a maintainer to make a difference on the project! - diff --git a/vendor/github.com/docker/libcontainer/Dockerfile b/vendor/github.com/docker/libcontainer/Dockerfile deleted file mode 100644 index a8854988..00000000 --- a/vendor/github.com/docker/libcontainer/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -FROM golang:1.4 - -RUN echo "deb http://ftp.us.debian.org/debian testing main contrib" >> /etc/apt/sources.list -RUN apt-get update && apt-get install -y iptables criu=1.5.2-1 && rm -rf /var/lib/apt/lists/* - -RUN go get golang.org/x/tools/cmd/cover - -ENV GOPATH $GOPATH:/go/src/github.com/docker/libcontainer/vendor -RUN go get github.com/docker/docker/pkg/term - -# setup a playground for us to spawn containers in -RUN mkdir /busybox && \ - curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.11/rootfs.tar' | tar -xC /busybox - -RUN curl -sSL https://raw.githubusercontent.com/docker/docker/master/hack/dind -o /dind && \ - chmod +x /dind - -COPY . /go/src/github.com/docker/libcontainer -WORKDIR /go/src/github.com/docker/libcontainer -RUN cp sample_configs/minimal.json /busybox/container.json - -RUN make direct-install - -ENTRYPOINT ["/dind"] -CMD ["make", "direct-test"] diff --git a/vendor/github.com/docker/libcontainer/MAINTAINERS b/vendor/github.com/docker/libcontainer/MAINTAINERS deleted file mode 100644 index cea3500f..00000000 --- a/vendor/github.com/docker/libcontainer/MAINTAINERS +++ /dev/null @@ -1,7 +0,0 @@ -Michael Crosby (@crosbymichael) -Rohit Jnagal (@rjnagal) -Victor Marmol (@vmarmol) -Mrunal Patel (@mrunalp) -Alexandr Morozov (@LK4D4) -Daniel, Dao Quang Minh (@dqminh) -update-vendor.sh: Tianon Gravi (@tianon) diff --git a/vendor/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md b/vendor/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md deleted file mode 100644 index 2ac9ca21..00000000 --- a/vendor/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md +++ /dev/null @@ -1,99 +0,0 @@ -# The libcontainer Maintainers' Guide - -## Introduction - -Dear maintainer. Thank you for investing the time and energy to help -make libcontainer as useful as possible. Maintaining a project is difficult, -sometimes unrewarding work. Sure, you will get to contribute cool -features to the project. But most of your time will be spent reviewing, -cleaning up, documenting, answering questions, justifying design -decisions - while everyone has all the fun! But remember - the quality -of the maintainers work is what distinguishes the good projects from the -great. So please be proud of your work, even the unglamourous parts, -and encourage a culture of appreciation and respect for *every* aspect -of improving the project - not just the hot new features. - -This document is a manual for maintainers old and new. It explains what -is expected of maintainers, how they should work, and what tools are -available to them. - -This is a living document - if you see something out of date or missing, -speak up! - -## What are a maintainer's responsibility? - -It is every maintainer's responsibility to: - -* 1) Expose a clear roadmap for improving their component. -* 2) Deliver prompt feedback and decisions on pull requests. -* 3) Be available to anyone with questions, bug reports, criticism etc. - on their component. This includes IRC, GitHub requests and the mailing - list. -* 4) Make sure their component respects the philosophy, design and - roadmap of the project. - -## How are decisions made? - -Short answer: with pull requests to the libcontainer repository. - -libcontainer is an open-source project with an open design philosophy. This -means that the repository is the source of truth for EVERY aspect of the -project, including its philosophy, design, roadmap and APIs. *If it's -part of the project, it's in the repo. It's in the repo, it's part of -the project.* - -As a result, all decisions can be expressed as changes to the -repository. An implementation change is a change to the source code. An -API change is a change to the API specification. A philosophy change is -a change to the philosophy manifesto. And so on. - -All decisions affecting libcontainer, big and small, follow the same 3 steps: - -* Step 1: Open a pull request. Anyone can do this. - -* Step 2: Discuss the pull request. Anyone can do this. - -* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do -this (see below "Who decides what?") - - -## Who decides what? - -All decisions are pull requests, and the relevant maintainers make -decisions by accepting or refusing the pull request. Review and acceptance -by anyone is denoted by adding a comment in the pull request: `LGTM`. -However, only currently listed `MAINTAINERS` are counted towards the required -two LGTMs. - -libcontainer follows the timeless, highly efficient and totally unfair system -known as [Benevolent dictator for life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with Michael Crosby in the role of BDFL. -This means that all decisions are made by default by Michael. Since making -every decision himself would be highly un-scalable, in practice decisions -are spread across multiple maintainers. - -The relevant maintainers for a pull request can be worked out in two steps: - -* Step 1: Determine the subdirectories affected by the pull request. This - might be `netlink/` and `security/`, or any other part of the repo. - -* Step 2: Find the `MAINTAINERS` file which affects this directory. If the - directory itself does not have a `MAINTAINERS` file, work your way up - the repo hierarchy until you find one. - -### I'm a maintainer, and I'm going on holiday - -Please let your co-maintainers and other contributors know by raising a pull -request that comments out your `MAINTAINERS` file entry using a `#`. - -### I'm a maintainer, should I make pull requests too? - -Yes. Nobody should ever push to master directly. All changes should be -made through a pull request. - -### Who assigns maintainers? - -Michael has final `LGTM` approval for all pull requests to `MAINTAINERS` files. - -### How is this process changed? - -Just like everything else: by making a pull request :) diff --git a/vendor/github.com/docker/libcontainer/Makefile b/vendor/github.com/docker/libcontainer/Makefile deleted file mode 100644 index 1a2e23e0..00000000 --- a/vendor/github.com/docker/libcontainer/Makefile +++ /dev/null @@ -1,33 +0,0 @@ - -all: - docker build -t dockercore/libcontainer . - -test: - # we need NET_ADMIN for the netlink tests and SYS_ADMIN for mounting - docker run --rm -it --privileged dockercore/libcontainer - -sh: - docker run --rm -it --privileged -w /busybox dockercore/libcontainer nsinit exec sh - -GO_PACKAGES = $(shell find . -not \( -wholename ./vendor -prune -o -wholename ./.git -prune \) -name '*.go' -print0 | xargs -0n1 dirname | sort -u) - -direct-test: - go test $(TEST_TAGS) -cover -v $(GO_PACKAGES) - -direct-test-short: - go test $(TEST_TAGS) -cover -test.short -v $(GO_PACKAGES) - -direct-build: - go build -v $(GO_PACKAGES) - -direct-install: - go install -v $(GO_PACKAGES) - -local: - go test -v - -validate: - hack/validate.sh - -binary: all - docker run --rm --privileged -v $(CURDIR)/bundles:/go/bin dockercore/libcontainer make direct-install diff --git a/vendor/github.com/docker/libcontainer/NOTICE b/vendor/github.com/docker/libcontainer/NOTICE deleted file mode 100644 index dc912987..00000000 --- a/vendor/github.com/docker/libcontainer/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -libcontainer -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (http://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see http://www.bis.doc.gov - -See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/libcontainer/PRINCIPLES.md b/vendor/github.com/docker/libcontainer/PRINCIPLES.md deleted file mode 100644 index 05606421..00000000 --- a/vendor/github.com/docker/libcontainer/PRINCIPLES.md +++ /dev/null @@ -1,19 +0,0 @@ -# libcontainer Principles - -In the design and development of libcontainer we try to follow these principles: - -(Work in progress) - -* Don't try to replace every tool. Instead, be an ingredient to improve them. -* Less code is better. -* Fewer components are better. Do you really need to add one more class? -* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. -* Don't do later what you can do now. "//TODO: refactor" is not acceptable in new code. -* When hesitating between two options, choose the one that is easier to reverse. -* "No" is temporary; "Yes" is forever. If you're not sure about a new feature, say no. You can change your mind later. -* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. -* The fewer moving parts in a container, the better. -* Don't merge it unless you document it. -* Don't document it unless you can keep it up-to-date. -* Don't merge it unless you test it! -* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. diff --git a/vendor/github.com/docker/libcontainer/README.md b/vendor/github.com/docker/libcontainer/README.md deleted file mode 100644 index 20ae1155..00000000 --- a/vendor/github.com/docker/libcontainer/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# NOTICE - -Please refer to https://github.com/opencontainers/runc/tree/master/libcontainer to make contributions to libcontainer and `runc`. - -For more information about the open container project and foundation please refer to: - -http://blog.docker.com/2015/06/open-container-project-foundation/ diff --git a/vendor/github.com/docker/libcontainer/ROADMAP.md b/vendor/github.com/docker/libcontainer/ROADMAP.md deleted file mode 100644 index f5903535..00000000 --- a/vendor/github.com/docker/libcontainer/ROADMAP.md +++ /dev/null @@ -1,20 +0,0 @@ -# libcontainer: what's next? - -This document is a high-level overview of where we want to take libcontainer next. -It is a curated selection of planned improvements which are either important, difficult, or both. - -For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/docker/libcontainer/issues). - -To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request. - -## Broader kernel support - -Our goal is to make libcontainer run everywhere, but currently libcontainer requires Linux version 3.8 or higher. If you’re deploying new machines for the purpose of running libcontainer, this is a fairly easy requirement to meet. However, if you’re adding libcontainer to an existing deployment, you may not have the flexibility to update and patch the kernel. - -## Cross-architecture support - -Our goal is to make libcontainer run everywhere. Recently libcontainer has -expanded from its initial support for x86_64 systems to include POWER (ppc64 -little and big endian variants), IBM System z (s390x 64-bit), and ARM. We plan -to continue expanding architecture support such that libcontainer containers -can be created and used on more architectures. diff --git a/vendor/github.com/docker/libcontainer/SPEC.md b/vendor/github.com/docker/libcontainer/SPEC.md deleted file mode 100644 index 060f07da..00000000 --- a/vendor/github.com/docker/libcontainer/SPEC.md +++ /dev/null @@ -1,334 +0,0 @@ -## Container Specification - v1 - -This is the standard configuration for version 1 containers. It includes -namespaces, standard filesystem setup, a default Linux capability set, and -information about resource reservations. It also has information about any -populated environment settings for the processes running inside a container. - -Along with the configuration of how a container is created the standard also -discusses actions that can be performed on a container to manage and inspect -information about the processes running inside. - -The v1 profile is meant to be able to accommodate the majority of applications -with a strong security configuration. - -### System Requirements and Compatibility - -Minimum requirements: -* Kernel version - 3.10 recommended 2.6.2x minimum(with backported patches) -* Mounted cgroups with each subsystem in its own hierarchy - - -### Namespaces - -| Flag | Enabled | -| ------------ | ------- | -| CLONE_NEWPID | 1 | -| CLONE_NEWUTS | 1 | -| CLONE_NEWIPC | 1 | -| CLONE_NEWNET | 1 | -| CLONE_NEWNS | 1 | -| CLONE_NEWUSER | 1 | - -Namespaces are created for the container via the `clone` syscall. - - -### Filesystem - -A root filesystem must be provided to a container for execution. The container -will use this root filesystem (rootfs) to jail and spawn processes inside where -the binaries and system libraries are local to that directory. Any binaries -to be executed must be contained within this rootfs. - -Mounts that happen inside the container are automatically cleaned up when the -container exits as the mount namespace is destroyed and the kernel will -unmount all the mounts that were setup within that namespace. - -For a container to execute properly there are certain filesystems that -are required to be mounted within the rootfs that the runtime will setup. - -| Path | Type | Flags | Data | -| ----------- | ------ | -------------------------------------- | ---------------------------------------- | -| /proc | proc | MS_NOEXEC,MS_NOSUID,MS_NODEV | | -| /dev | tmpfs | MS_NOEXEC,MS_STRICTATIME | mode=755 | -| /dev/shm | tmpfs | MS_NOEXEC,MS_NOSUID,MS_NODEV | mode=1777,size=65536k | -| /dev/mqueue | mqueue | MS_NOEXEC,MS_NOSUID,MS_NODEV | | -| /dev/pts | devpts | MS_NOEXEC,MS_NOSUID | newinstance,ptmxmode=0666,mode=620,gid=5 | -| /sys | sysfs | MS_NOEXEC,MS_NOSUID,MS_NODEV,MS_RDONLY | | - - -After a container's filesystems are mounted within the newly created -mount namespace `/dev` will need to be populated with a set of device nodes. -It is expected that a rootfs does not need to have any device nodes specified -for `/dev` witin the rootfs as the container will setup the correct devices -that are required for executing a container's process. - -| Path | Mode | Access | -| ------------ | ---- | ---------- | -| /dev/null | 0666 | rwm | -| /dev/zero | 0666 | rwm | -| /dev/full | 0666 | rwm | -| /dev/tty | 0666 | rwm | -| /dev/random | 0666 | rwm | -| /dev/urandom | 0666 | rwm | -| /dev/fuse | 0666 | rwm | - - -**ptmx** -`/dev/ptmx` will need to be a symlink to the host's `/dev/ptmx` within -the container. - -The use of a pseudo TTY is optional within a container and it should support both. -If a pseudo is provided to the container `/dev/console` will need to be -setup by binding the console in `/dev/` after it has been populated and mounted -in tmpfs. - -| Source | Destination | UID GID | Mode | Type | -| --------------- | ------------ | ------- | ---- | ---- | -| *pty host path* | /dev/console | 0 0 | 0600 | bind | - - -After `/dev/null` has been setup we check for any external links between -the container's io, STDIN, STDOUT, STDERR. If the container's io is pointing -to `/dev/null` outside the container we close and `dup2` the the `/dev/null` -that is local to the container's rootfs. - - -After the container has `/proc` mounted a few standard symlinks are setup -within `/dev/` for the io. - -| Source | Destination | -| ------------ | ----------- | -| /proc/1/fd | /dev/fd | -| /proc/1/fd/0 | /dev/stdin | -| /proc/1/fd/1 | /dev/stdout | -| /proc/1/fd/2 | /dev/stderr | - -A `pivot_root` is used to change the root for the process, effectively -jailing the process inside the rootfs. - -```c -put_old = mkdir(...); -pivot_root(rootfs, put_old); -chdir("/"); -umount(put_old, MNT_DETACH); -rmdir(put_old); -``` - -For container's running with a rootfs inside `ramfs` a `MS_MOVE` combined -with a `chroot` is required as `pivot_root` is not supported in `ramfs`. - -```c -mount(rootfs, "/", NULL, MS_MOVE, NULL); -chroot("."); -chdir("/"); -``` - -The `umask` is set back to `0022` after the filesystem setup has been completed. - -### Resources - -Cgroups are used to handle resource allocation for containers. This includes -system resources like cpu, memory, and device access. - -| Subsystem | Enabled | -| ---------- | ------- | -| devices | 1 | -| memory | 1 | -| cpu | 1 | -| cpuacct | 1 | -| cpuset | 1 | -| blkio | 1 | -| perf_event | 1 | -| freezer | 1 | -| hugetlb | 1 | - - -All cgroup subsystem are joined so that statistics can be collected from -each of the subsystems. Freezer does not expose any stats but is joined -so that containers can be paused and resumed. - -The parent process of the container's init must place the init pid inside -the correct cgroups before the initialization begins. This is done so -that no processes or threads escape the cgroups. This sync is -done via a pipe ( specified in the runtime section below ) that the container's -init process will block waiting for the parent to finish setup. - -### Security - -The standard set of Linux capabilities that are set in a container -provide a good default for security and flexibility for the applications. - - -| Capability | Enabled | -| -------------------- | ------- | -| CAP_NET_RAW | 1 | -| CAP_NET_BIND_SERVICE | 1 | -| CAP_AUDIT_READ | 1 | -| CAP_AUDIT_WRITE | 1 | -| CAP_DAC_OVERRIDE | 1 | -| CAP_SETFCAP | 1 | -| CAP_SETPCAP | 1 | -| CAP_SETGID | 1 | -| CAP_SETUID | 1 | -| CAP_MKNOD | 1 | -| CAP_CHOWN | 1 | -| CAP_FOWNER | 1 | -| CAP_FSETID | 1 | -| CAP_KILL | 1 | -| CAP_SYS_CHROOT | 1 | -| CAP_NET_BROADCAST | 0 | -| CAP_SYS_MODULE | 0 | -| CAP_SYS_RAWIO | 0 | -| CAP_SYS_PACCT | 0 | -| CAP_SYS_ADMIN | 0 | -| CAP_SYS_NICE | 0 | -| CAP_SYS_RESOURCE | 0 | -| CAP_SYS_TIME | 0 | -| CAP_SYS_TTY_CONFIG | 0 | -| CAP_AUDIT_CONTROL | 0 | -| CAP_MAC_OVERRIDE | 0 | -| CAP_MAC_ADMIN | 0 | -| CAP_NET_ADMIN | 0 | -| CAP_SYSLOG | 0 | -| CAP_DAC_READ_SEARCH | 0 | -| CAP_LINUX_IMMUTABLE | 0 | -| CAP_IPC_LOCK | 0 | -| CAP_IPC_OWNER | 0 | -| CAP_SYS_PTRACE | 0 | -| CAP_SYS_BOOT | 0 | -| CAP_LEASE | 0 | -| CAP_WAKE_ALARM | 0 | -| CAP_BLOCK_SUSPE | 0 | - - -Additional security layers like [apparmor](https://wiki.ubuntu.com/AppArmor) -and [selinux](http://selinuxproject.org/page/Main_Page) can be used with -the containers. A container should support setting an apparmor profile or -selinux process and mount labels if provided in the configuration. - -Standard apparmor profile: -```c -#include -profile flags=(attach_disconnected,mediate_deleted) { - #include - network, - capability, - file, - umount, - - deny @{PROC}/sys/fs/** wklx, - deny @{PROC}/sysrq-trigger rwklx, - deny @{PROC}/mem rwklx, - deny @{PROC}/kmem rwklx, - deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx, - deny @{PROC}/sys/kernel/*/** wklx, - - deny mount, - - deny /sys/[^f]*/** wklx, - deny /sys/f[^s]*/** wklx, - deny /sys/fs/[^c]*/** wklx, - deny /sys/fs/c[^g]*/** wklx, - deny /sys/fs/cg[^r]*/** wklx, - deny /sys/firmware/efi/efivars/** rwklx, - deny /sys/kernel/security/** rwklx, -} -``` - -*TODO: seccomp work is being done to find a good default config* - -### Runtime and Init Process - -During container creation the parent process needs to talk to the container's init -process and have a form of synchronization. This is accomplished by creating -a pipe that is passed to the container's init. When the init process first spawns -it will block on its side of the pipe until the parent closes its side. This -allows the parent to have time to set the new process inside a cgroup hierarchy -and/or write any uid/gid mappings required for user namespaces. -The pipe is passed to the init process via FD 3. - -The application consuming libcontainer should be compiled statically. libcontainer -does not define any init process and the arguments provided are used to `exec` the -process inside the application. There should be no long running init within the -container spec. - -If a pseudo tty is provided to a container it will open and `dup2` the console -as the container's STDIN, STDOUT, STDERR as well as mounting the console -as `/dev/console`. - -An extra set of mounts are provided to a container and setup for use. A container's -rootfs can contain some non portable files inside that can cause side effects during -execution of a process. These files are usually created and populated with the container -specific information via the runtime. - -**Extra runtime files:** -* /etc/hosts -* /etc/resolv.conf -* /etc/hostname -* /etc/localtime - - -#### Defaults - -There are a few defaults that can be overridden by users, but in their omission -these apply to processes within a container. - -| Type | Value | -| ------------------- | ------------------------------ | -| Parent Death Signal | SIGKILL | -| UID | 0 | -| GID | 0 | -| GROUPS | 0, NULL | -| CWD | "/" | -| $HOME | Current user's home dir or "/" | -| Readonly rootfs | false | -| Pseudo TTY | false | - - -## Actions - -After a container is created there is a standard set of actions that can -be done to the container. These actions are part of the public API for -a container. - -| Action | Description | -| -------------- | ------------------------------------------------------------------ | -| Get processes | Return all the pids for processes running inside a container | -| Get Stats | Return resource statistics for the container as a whole | -| Wait | Wait waits on the container's init process ( pid 1 ) | -| Wait Process | Wait on any of the container's processes returning the exit status | -| Destroy | Kill the container's init process and remove any filesystem state | -| Signal | Send a signal to the container's init process | -| Signal Process | Send a signal to any of the container's processes | -| Pause | Pause all processes inside the container | -| Resume | Resume all processes inside the container if paused | -| Exec | Execute a new process inside of the container ( requires setns ) | -| Set | Setup configs of the container after it's created | - -### Execute a new process inside of a running container. - -User can execute a new process inside of a running container. Any binaries to be -executed must be accessible within the container's rootfs. - -The started process will run inside the container's rootfs. Any changes -made by the process to the container's filesystem will persist after the -process finished executing. - -The started process will join all the container's existing namespaces. When the -container is paused, the process will also be paused and will resume when -the container is unpaused. The started process will only run when the container's -primary process (PID 1) is running, and will not be restarted when the container -is restarted. - -#### Planned additions - -The started process will have its own cgroups nested inside the container's -cgroups. This is used for process tracking and optionally resource allocation -handling for the new process. Freezer cgroup is required, the rest of the cgroups -are optional. The process executor must place its pid inside the correct -cgroups before starting the process. This is done so that no child processes or -threads can escape the cgroups. - -When the process is stopped, the process executor will try (in a best-effort way) -to stop all its children and remove the sub-cgroups. diff --git a/vendor/github.com/docker/libcontainer/capabilities_linux.go b/vendor/github.com/docker/libcontainer/capabilities_linux.go deleted file mode 100644 index 6b8b465c..00000000 --- a/vendor/github.com/docker/libcontainer/capabilities_linux.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "os" - - "github.com/syndtr/gocapability/capability" -) - -const allCapabilityTypes = capability.CAPS | capability.BOUNDS - -var capabilityList = map[string]capability.Cap{ - "SETPCAP": capability.CAP_SETPCAP, - "SYS_MODULE": capability.CAP_SYS_MODULE, - "SYS_RAWIO": capability.CAP_SYS_RAWIO, - "SYS_PACCT": capability.CAP_SYS_PACCT, - "SYS_ADMIN": capability.CAP_SYS_ADMIN, - "SYS_NICE": capability.CAP_SYS_NICE, - "SYS_RESOURCE": capability.CAP_SYS_RESOURCE, - "SYS_TIME": capability.CAP_SYS_TIME, - "SYS_TTY_CONFIG": capability.CAP_SYS_TTY_CONFIG, - "MKNOD": capability.CAP_MKNOD, - "AUDIT_WRITE": capability.CAP_AUDIT_WRITE, - "AUDIT_CONTROL": capability.CAP_AUDIT_CONTROL, - "MAC_OVERRIDE": capability.CAP_MAC_OVERRIDE, - "MAC_ADMIN": capability.CAP_MAC_ADMIN, - "NET_ADMIN": capability.CAP_NET_ADMIN, - "SYSLOG": capability.CAP_SYSLOG, - "CHOWN": capability.CAP_CHOWN, - "NET_RAW": capability.CAP_NET_RAW, - "DAC_OVERRIDE": capability.CAP_DAC_OVERRIDE, - "FOWNER": capability.CAP_FOWNER, - "DAC_READ_SEARCH": capability.CAP_DAC_READ_SEARCH, - "FSETID": capability.CAP_FSETID, - "KILL": capability.CAP_KILL, - "SETGID": capability.CAP_SETGID, - "SETUID": capability.CAP_SETUID, - "LINUX_IMMUTABLE": capability.CAP_LINUX_IMMUTABLE, - "NET_BIND_SERVICE": capability.CAP_NET_BIND_SERVICE, - "NET_BROADCAST": capability.CAP_NET_BROADCAST, - "IPC_LOCK": capability.CAP_IPC_LOCK, - "IPC_OWNER": capability.CAP_IPC_OWNER, - "SYS_CHROOT": capability.CAP_SYS_CHROOT, - "SYS_PTRACE": capability.CAP_SYS_PTRACE, - "SYS_BOOT": capability.CAP_SYS_BOOT, - "LEASE": capability.CAP_LEASE, - "SETFCAP": capability.CAP_SETFCAP, - "WAKE_ALARM": capability.CAP_WAKE_ALARM, - "BLOCK_SUSPEND": capability.CAP_BLOCK_SUSPEND, - "AUDIT_READ": capability.CAP_AUDIT_READ, -} - -func newCapWhitelist(caps []string) (*whitelist, error) { - l := []capability.Cap{} - for _, c := range caps { - v, ok := capabilityList[c] - if !ok { - return nil, fmt.Errorf("unknown capability %q", c) - } - l = append(l, v) - } - pid, err := capability.NewPid(os.Getpid()) - if err != nil { - return nil, err - } - return &whitelist{ - keep: l, - pid: pid, - }, nil -} - -type whitelist struct { - pid capability.Capabilities - keep []capability.Cap -} - -// dropBoundingSet drops the capability bounding set to those specified in the whitelist. -func (w *whitelist) dropBoundingSet() error { - w.pid.Clear(capability.BOUNDS) - w.pid.Set(capability.BOUNDS, w.keep...) - return w.pid.Apply(capability.BOUNDS) -} - -// drop drops all capabilities for the current process except those specified in the whitelist. -func (w *whitelist) drop() error { - w.pid.Clear(allCapabilityTypes) - w.pid.Set(allCapabilityTypes, w.keep...) - return w.pid.Apply(allCapabilityTypes) -} diff --git a/vendor/github.com/docker/libcontainer/console.go b/vendor/github.com/docker/libcontainer/console.go deleted file mode 100644 index 042a2a2e..00000000 --- a/vendor/github.com/docker/libcontainer/console.go +++ /dev/null @@ -1,15 +0,0 @@ -package libcontainer - -import "io" - -// Console represents a pseudo TTY. -type Console interface { - io.ReadWriter - io.Closer - - // Path returns the filesystem path to the slave side of the pty. - Path() string - - // Fd returns the fd for the master of the pty. - Fd() uintptr -} diff --git a/vendor/github.com/docker/libcontainer/console_freebsd.go b/vendor/github.com/docker/libcontainer/console_freebsd.go deleted file mode 100644 index 4d20b8da..00000000 --- a/vendor/github.com/docker/libcontainer/console_freebsd.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build freebsd - -package libcontainer - -import ( - "errors" -) - -// newConsole returns an initalized console that can be used within a container by copying bytes -// from the master side to the slave that is attached as the tty for the container's init process. -func newConsole(uid, gid int) (Console, error) { - return nil, errors.New("libcontainer console is not supported on FreeBSD") -} diff --git a/vendor/github.com/docker/libcontainer/console_linux.go b/vendor/github.com/docker/libcontainer/console_linux.go deleted file mode 100644 index e35ac529..00000000 --- a/vendor/github.com/docker/libcontainer/console_linux.go +++ /dev/null @@ -1,145 +0,0 @@ -package libcontainer - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "unsafe" - - "github.com/docker/libcontainer/label" -) - -// newConsole returns an initalized console that can be used within a container by copying bytes -// from the master side to the slave that is attached as the tty for the container's init process. -func newConsole(uid, gid int) (Console, error) { - master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) - if err != nil { - return nil, err - } - console, err := ptsname(master) - if err != nil { - return nil, err - } - if err := unlockpt(master); err != nil { - return nil, err - } - if err := os.Chmod(console, 0600); err != nil { - return nil, err - } - if err := os.Chown(console, uid, gid); err != nil { - return nil, err - } - return &linuxConsole{ - slavePath: console, - master: master, - }, nil -} - -// newConsoleFromPath is an internal function returning an initialized console for use inside -// a container's MNT namespace. -func newConsoleFromPath(slavePath string) *linuxConsole { - return &linuxConsole{ - slavePath: slavePath, - } -} - -// linuxConsole is a linux psuedo TTY for use within a container. -type linuxConsole struct { - master *os.File - slavePath string -} - -func (c *linuxConsole) Fd() uintptr { - return c.master.Fd() -} - -func (c *linuxConsole) Path() string { - return c.slavePath -} - -func (c *linuxConsole) Read(b []byte) (int, error) { - return c.master.Read(b) -} - -func (c *linuxConsole) Write(b []byte) (int, error) { - return c.master.Write(b) -} - -func (c *linuxConsole) Close() error { - if m := c.master; m != nil { - return m.Close() - } - return nil -} - -// mount initializes the console inside the rootfs mounting with the specified mount label -// and applying the correct ownership of the console. -func (c *linuxConsole) mount(rootfs, mountLabel string, uid, gid int) error { - oldMask := syscall.Umask(0000) - defer syscall.Umask(oldMask) - if err := label.SetFileLabel(c.slavePath, mountLabel); err != nil { - return err - } - dest := filepath.Join(rootfs, "/dev/console") - f, err := os.Create(dest) - if err != nil && !os.IsExist(err) { - return err - } - if f != nil { - f.Close() - } - return syscall.Mount(c.slavePath, dest, "bind", syscall.MS_BIND, "") -} - -// dupStdio opens the slavePath for the console and dups the fds to the current -// processes stdio, fd 0,1,2. -func (c *linuxConsole) dupStdio() error { - slave, err := c.open(syscall.O_RDWR) - if err != nil { - return err - } - fd := int(slave.Fd()) - for _, i := range []int{0, 1, 2} { - if err := syscall.Dup3(fd, i, 0); err != nil { - return err - } - } - return nil -} - -// open is a clone of os.OpenFile without the O_CLOEXEC used to open the pty slave. -func (c *linuxConsole) open(flag int) (*os.File, error) { - r, e := syscall.Open(c.slavePath, flag, 0) - if e != nil { - return nil, &os.PathError{ - Op: "open", - Path: c.slavePath, - Err: e, - } - } - return os.NewFile(uintptr(r), c.slavePath), nil -} - -func ioctl(fd uintptr, flag, data uintptr) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 { - return err - } - return nil -} - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - var u int32 - return ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - var n int32 - if err := ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", n), nil -} diff --git a/vendor/github.com/docker/libcontainer/console_windows.go b/vendor/github.com/docker/libcontainer/console_windows.go deleted file mode 100644 index 80c7463b..00000000 --- a/vendor/github.com/docker/libcontainer/console_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package libcontainer - -// newConsole returns an initalized console that can be used within a container -func newConsole(uid, gid int) (Console, error) { - return &windowsConsole{}, nil -} - -// windowsConsole is a Windows psuedo TTY for use within a container. -type windowsConsole struct { -} - -func (c *windowsConsole) Fd() uintptr { - return 0 -} - -func (c *windowsConsole) Path() string { - return "" -} - -func (c *windowsConsole) Read(b []byte) (int, error) { - return 0, nil -} - -func (c *windowsConsole) Write(b []byte) (int, error) { - return 0, nil -} - -func (c *windowsConsole) Close() error { - return nil -} diff --git a/vendor/github.com/docker/libcontainer/container.go b/vendor/github.com/docker/libcontainer/container.go deleted file mode 100644 index 17f2f21b..00000000 --- a/vendor/github.com/docker/libcontainer/container.go +++ /dev/null @@ -1,162 +0,0 @@ -// Libcontainer provides a native Go implementation for creating containers -// with namespaces, cgroups, capabilities, and filesystem access controls. -// It allows you to manage the lifecycle of the container performing additional operations -// after the container is created. -package libcontainer - -import ( - "github.com/docker/libcontainer/configs" -) - -// The status of a container. -type Status int - -const ( - // The container exists and is running. - Running Status = iota + 1 - - // The container exists, it is in the process of being paused. - Pausing - - // The container exists, but all its processes are paused. - Paused - - // The container exists, but its state is saved on disk - Checkpointed - - // The container does not exist. - Destroyed -) - -// State represents a running container's state -type State struct { - // ID is the container ID. - ID string `json:"id"` - - // InitProcessPid is the init process id in the parent namespace. - InitProcessPid int `json:"init_process_pid"` - - // InitProcessStartTime is the init process start time. - InitProcessStartTime string `json:"init_process_start"` - - // Path to all the cgroups setup for a container. Key is cgroup subsystem name - // with the value as the path. - CgroupPaths map[string]string `json:"cgroup_paths"` - - // NamespacePaths are filepaths to the container's namespaces. Key is the namespace type - // with the value as the path. - NamespacePaths map[configs.NamespaceType]string `json:"namespace_paths"` - - // Config is the container's configuration. - Config configs.Config `json:"config"` - - // Container's standard descriptors (std{in,out,err}), needed for checkpoint and restore - ExternalDescriptors []string `json:"external_descriptors,omitempty"` -} - -// A libcontainer container object. -// -// Each container is thread-safe within the same process. Since a container can -// be destroyed by a separate process, any function may return that the container -// was not found. -type Container interface { - // Returns the ID of the container - ID() string - - // Returns the current status of the container. - // - // errors: - // ContainerDestroyed - Container no longer exists, - // Systemerror - System error. - Status() (Status, error) - - // State returns the current container's state information. - // - // errors: - // Systemerror - System error. - State() (*State, error) - - // Returns the current config of the container. - Config() configs.Config - - // Returns the PIDs inside this container. The PIDs are in the namespace of the calling process. - // - // errors: - // ContainerDestroyed - Container no longer exists, - // Systemerror - System error. - // - // Some of the returned PIDs may no longer refer to processes in the Container, unless - // the Container state is PAUSED in which case every PID in the slice is valid. - Processes() ([]int, error) - - // Returns statistics for the container. - // - // errors: - // ContainerDestroyed - Container no longer exists, - // Systemerror - System error. - Stats() (*Stats, error) - - // Set cgroup resources of container as configured - // - // We can use this to change resources when containers are running. - // - // errors: - // Systemerror - System error. - Set(config configs.Config) error - - // Start a process inside the container. Returns error if process fails to - // start. You can track process lifecycle with passed Process structure. - // - // errors: - // ContainerDestroyed - Container no longer exists, - // ConfigInvalid - config is invalid, - // ContainerPaused - Container is paused, - // Systemerror - System error. - Start(process *Process) (err error) - - // Checkpoint checkpoints the running container's state to disk using the criu(8) utility. - // - // errors: - // Systemerror - System error. - Checkpoint(criuOpts *CriuOpts) error - - // Restore restores the checkpointed container to a running state using the criu(8) utiity. - // - // errors: - // Systemerror - System error. - Restore(process *Process, criuOpts *CriuOpts) error - - // Destroys the container after killing all running processes. - // - // Any event registrations are removed before the container is destroyed. - // No error is returned if the container is already destroyed. - // - // errors: - // Systemerror - System error. - Destroy() error - - // If the Container state is RUNNING or PAUSING, sets the Container state to PAUSING and pauses - // the execution of any user processes. Asynchronously, when the container finished being paused the - // state is changed to PAUSED. - // If the Container state is PAUSED, do nothing. - // - // errors: - // ContainerDestroyed - Container no longer exists, - // Systemerror - System error. - Pause() error - - // If the Container state is PAUSED, resumes the execution of any user processes in the - // Container before setting the Container state to RUNNING. - // If the Container state is RUNNING, do nothing. - // - // errors: - // ContainerDestroyed - Container no longer exists, - // Systemerror - System error. - Resume() error - - // NotifyOOM returns a read-only channel signaling when the container receives an OOM notification. - // - // errors: - // Systemerror - System error. - NotifyOOM() (<-chan struct{}, error) -} diff --git a/vendor/github.com/docker/libcontainer/container_linux.go b/vendor/github.com/docker/libcontainer/container_linux.go deleted file mode 100644 index 215f35d3..00000000 --- a/vendor/github.com/docker/libcontainer/container_linux.go +++ /dev/null @@ -1,779 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - "sync" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/libcontainer/cgroups" - "github.com/docker/libcontainer/configs" - "github.com/docker/libcontainer/criurpc" - "github.com/golang/protobuf/proto" -) - -const stdioFdCount = 3 - -type linuxContainer struct { - id string - root string - config *configs.Config - cgroupManager cgroups.Manager - initPath string - initArgs []string - initProcess parentProcess - criuPath string - m sync.Mutex -} - -// ID returns the container's unique ID -func (c *linuxContainer) ID() string { - return c.id -} - -// Config returns the container's configuration -func (c *linuxContainer) Config() configs.Config { - return *c.config -} - -func (c *linuxContainer) Status() (Status, error) { - c.m.Lock() - defer c.m.Unlock() - return c.currentStatus() -} - -func (c *linuxContainer) State() (*State, error) { - c.m.Lock() - defer c.m.Unlock() - return c.currentState() -} - -func (c *linuxContainer) Processes() ([]int, error) { - pids, err := c.cgroupManager.GetPids() - if err != nil { - return nil, newSystemError(err) - } - return pids, nil -} - -func (c *linuxContainer) Stats() (*Stats, error) { - var ( - err error - stats = &Stats{} - ) - if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil { - return stats, newSystemError(err) - } - for _, iface := range c.config.Networks { - switch iface.Type { - case "veth": - istats, err := getNetworkInterfaceStats(iface.HostInterfaceName) - if err != nil { - return stats, newSystemError(err) - } - stats.Interfaces = append(stats.Interfaces, istats) - } - } - return stats, nil -} - -func (c *linuxContainer) Set(config configs.Config) error { - c.m.Lock() - defer c.m.Unlock() - c.config = &config - return c.cgroupManager.Set(c.config) -} - -func (c *linuxContainer) Start(process *Process) error { - c.m.Lock() - defer c.m.Unlock() - status, err := c.currentStatus() - if err != nil { - return err - } - doInit := status == Destroyed - parent, err := c.newParentProcess(process, doInit) - if err != nil { - return newSystemError(err) - } - if err := parent.start(); err != nil { - // terminate the process to ensure that it properly is reaped. - if err := parent.terminate(); err != nil { - logrus.Warn(err) - } - return newSystemError(err) - } - process.ops = parent - if doInit { - c.updateState(parent) - } - return nil -} - -func (c *linuxContainer) newParentProcess(p *Process, doInit bool) (parentProcess, error) { - parentPipe, childPipe, err := newPipe() - if err != nil { - return nil, newSystemError(err) - } - cmd, err := c.commandTemplate(p, childPipe) - if err != nil { - return nil, newSystemError(err) - } - if !doInit { - return c.newSetnsProcess(p, cmd, parentPipe, childPipe), nil - } - return c.newInitProcess(p, cmd, parentPipe, childPipe) -} - -func (c *linuxContainer) commandTemplate(p *Process, childPipe *os.File) (*exec.Cmd, error) { - cmd := &exec.Cmd{ - Path: c.initPath, - Args: c.initArgs, - } - cmd.Stdin = p.Stdin - cmd.Stdout = p.Stdout - cmd.Stderr = p.Stderr - cmd.Dir = c.config.Rootfs - if cmd.SysProcAttr == nil { - cmd.SysProcAttr = &syscall.SysProcAttr{} - } - cmd.ExtraFiles = append(p.ExtraFiles, childPipe) - cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBCONTAINER_INITPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1)) - // NOTE: when running a container with no PID namespace and the parent process spawning the container is - // PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason - // even with the parent still running. - if c.config.ParentDeathSignal > 0 { - cmd.SysProcAttr.Pdeathsig = syscall.Signal(c.config.ParentDeathSignal) - } - return cmd, nil -} - -func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*initProcess, error) { - t := "_LIBCONTAINER_INITTYPE=standard" - cloneFlags := c.config.Namespaces.CloneFlags() - if cloneFlags&syscall.CLONE_NEWUSER != 0 { - if err := c.addUidGidMappings(cmd.SysProcAttr); err != nil { - // user mappings are not supported - return nil, err - } - // Default to root user when user namespaces are enabled. - if cmd.SysProcAttr.Credential == nil { - cmd.SysProcAttr.Credential = &syscall.Credential{} - } - } - cmd.Env = append(cmd.Env, t) - cmd.SysProcAttr.Cloneflags = cloneFlags - return &initProcess{ - cmd: cmd, - childPipe: childPipe, - parentPipe: parentPipe, - manager: c.cgroupManager, - config: c.newInitConfig(p), - }, nil -} - -func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) *setnsProcess { - cmd.Env = append(cmd.Env, - fmt.Sprintf("_LIBCONTAINER_INITPID=%d", c.initProcess.pid()), - "_LIBCONTAINER_INITTYPE=setns", - ) - if p.consolePath != "" { - cmd.Env = append(cmd.Env, "_LIBCONTAINER_CONSOLE_PATH="+p.consolePath) - } - // TODO: set on container for process management - return &setnsProcess{ - cmd: cmd, - cgroupPaths: c.cgroupManager.GetPaths(), - childPipe: childPipe, - parentPipe: parentPipe, - config: c.newInitConfig(p), - } -} - -func (c *linuxContainer) newInitConfig(process *Process) *initConfig { - return &initConfig{ - Config: c.config, - Args: process.Args, - Env: process.Env, - User: process.User, - Cwd: process.Cwd, - Console: process.consolePath, - Capabilities: process.Capabilities, - PassedFilesCount: len(process.ExtraFiles), - } -} - -func newPipe() (parent *os.File, child *os.File, err error) { - fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0) - if err != nil { - return nil, nil, err - } - return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil -} - -func (c *linuxContainer) Destroy() error { - c.m.Lock() - defer c.m.Unlock() - status, err := c.currentStatus() - if err != nil { - return err - } - if status != Destroyed { - return newGenericError(fmt.Errorf("container is not destroyed"), ContainerNotStopped) - } - if !c.config.Namespaces.Contains(configs.NEWPID) { - if err := killCgroupProcesses(c.cgroupManager); err != nil { - logrus.Warn(err) - } - } - err = c.cgroupManager.Destroy() - if rerr := os.RemoveAll(c.root); err == nil { - err = rerr - } - c.initProcess = nil - return err -} - -func (c *linuxContainer) Pause() error { - c.m.Lock() - defer c.m.Unlock() - return c.cgroupManager.Freeze(configs.Frozen) -} - -func (c *linuxContainer) Resume() error { - c.m.Lock() - defer c.m.Unlock() - return c.cgroupManager.Freeze(configs.Thawed) -} - -func (c *linuxContainer) NotifyOOM() (<-chan struct{}, error) { - return notifyOnOOM(c.cgroupManager.GetPaths()) -} - -// XXX debug support, remove when debugging done. -func addArgsFromEnv(evar string, args *[]string) { - if e := os.Getenv(evar); e != "" { - for _, f := range strings.Fields(e) { - *args = append(*args, f) - } - } - fmt.Printf(">>> criu %v\n", *args) -} - -func (c *linuxContainer) checkCriuVersion() error { - var x, y, z int - - out, err := exec.Command(c.criuPath, "-V").Output() - if err != nil { - return err - } - - n, err := fmt.Sscanf(string(out), "Version: %d.%d.%d\n", &x, &y, &z) // 1.5.2 - if err != nil { - n, err = fmt.Sscanf(string(out), "Version: %d.%d\n", &x, &y) // 1.6 - } - if n < 2 || err != nil { - return fmt.Errorf("Unable to parse the CRIU version: %s %d %s", out, n, err) - } - - if x*10000+y*100+z < 10502 { - return fmt.Errorf("CRIU version must be 1.5.2 or higher") - } - - return nil -} - -const descriptors_filename = "descriptors.json" - -func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error { - c.m.Lock() - defer c.m.Unlock() - - if err := c.checkCriuVersion(); err != nil { - return err - } - - if criuOpts.ImagesDirectory == "" { - criuOpts.ImagesDirectory = filepath.Join(c.root, "criu.image") - } - - // Since a container can be C/R'ed multiple times, - // the checkpoint directory may already exist. - if err := os.Mkdir(criuOpts.ImagesDirectory, 0755); err != nil && !os.IsExist(err) { - return err - } - - if criuOpts.WorkDirectory == "" { - criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work") - } - - if err := os.Mkdir(criuOpts.WorkDirectory, 0755); err != nil && !os.IsExist(err) { - return err - } - - workDir, err := os.Open(criuOpts.WorkDirectory) - if err != nil { - return err - } - defer workDir.Close() - - imageDir, err := os.Open(criuOpts.ImagesDirectory) - if err != nil { - return err - } - defer imageDir.Close() - - rpcOpts := criurpc.CriuOpts{ - ImagesDirFd: proto.Int32(int32(imageDir.Fd())), - WorkDirFd: proto.Int32(int32(workDir.Fd())), - LogLevel: proto.Int32(4), - LogFile: proto.String("dump.log"), - Root: proto.String(c.config.Rootfs), - ManageCgroups: proto.Bool(true), - NotifyScripts: proto.Bool(true), - Pid: proto.Int32(int32(c.initProcess.pid())), - ShellJob: proto.Bool(criuOpts.ShellJob), - LeaveRunning: proto.Bool(criuOpts.LeaveRunning), - TcpEstablished: proto.Bool(criuOpts.TcpEstablished), - ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections), - } - - // append optional criu opts, e.g., page-server and port - if criuOpts.PageServer.Address != "" && criuOpts.PageServer.Port != 0 { - rpcOpts.Ps = &criurpc.CriuPageServerInfo{ - Address: proto.String(criuOpts.PageServer.Address), - Port: proto.Int32(criuOpts.PageServer.Port), - } - } - - t := criurpc.CriuReqType_DUMP - req := criurpc.CriuReq{ - Type: &t, - Opts: &rpcOpts, - } - - for _, m := range c.config.Mounts { - if m.Device == "bind" { - mountDest := m.Destination - if strings.HasPrefix(mountDest, c.config.Rootfs) { - mountDest = mountDest[len(c.config.Rootfs):] - } - - extMnt := new(criurpc.ExtMountMap) - extMnt.Key = proto.String(mountDest) - extMnt.Val = proto.String(mountDest) - req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) - } - } - - // Write the FD info to a file in the image directory - - fdsJSON, err := json.Marshal(c.initProcess.externalDescriptors()) - if err != nil { - return err - } - - err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptors_filename), fdsJSON, 0655) - if err != nil { - return err - } - - err = c.criuSwrk(nil, &req, criuOpts) - if err != nil { - return err - } - return nil -} - -func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error { - c.m.Lock() - defer c.m.Unlock() - - if err := c.checkCriuVersion(); err != nil { - return err - } - - if criuOpts.WorkDirectory == "" { - criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work") - } - // Since a container can be C/R'ed multiple times, - // the work directory may already exist. - if err := os.Mkdir(criuOpts.WorkDirectory, 0655); err != nil && !os.IsExist(err) { - return err - } - - workDir, err := os.Open(criuOpts.WorkDirectory) - if err != nil { - return err - } - defer workDir.Close() - - if criuOpts.ImagesDirectory == "" { - criuOpts.ImagesDirectory = filepath.Join(c.root, "criu.image") - } - imageDir, err := os.Open(criuOpts.ImagesDirectory) - if err != nil { - return err - } - defer imageDir.Close() - - // CRIU has a few requirements for a root directory: - // * it must be a mount point - // * its parent must not be overmounted - // c.config.Rootfs is bind-mounted to a temporary directory - // to satisfy these requirements. - root := filepath.Join(c.root, "criu-root") - if err := os.Mkdir(root, 0755); err != nil { - return err - } - defer os.Remove(root) - - root, err = filepath.EvalSymlinks(root) - if err != nil { - return err - } - - err = syscall.Mount(c.config.Rootfs, root, "", syscall.MS_BIND|syscall.MS_REC, "") - if err != nil { - return err - } - defer syscall.Unmount(root, syscall.MNT_DETACH) - - t := criurpc.CriuReqType_RESTORE - req := criurpc.CriuReq{ - Type: &t, - Opts: &criurpc.CriuOpts{ - ImagesDirFd: proto.Int32(int32(imageDir.Fd())), - WorkDirFd: proto.Int32(int32(workDir.Fd())), - EvasiveDevices: proto.Bool(true), - LogLevel: proto.Int32(4), - LogFile: proto.String("restore.log"), - RstSibling: proto.Bool(true), - Root: proto.String(root), - ManageCgroups: proto.Bool(true), - NotifyScripts: proto.Bool(true), - ShellJob: proto.Bool(criuOpts.ShellJob), - ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections), - TcpEstablished: proto.Bool(criuOpts.TcpEstablished), - }, - } - for _, m := range c.config.Mounts { - if m.Device == "bind" { - mountDest := m.Destination - if strings.HasPrefix(mountDest, c.config.Rootfs) { - mountDest = mountDest[len(c.config.Rootfs):] - } - - extMnt := new(criurpc.ExtMountMap) - extMnt.Key = proto.String(mountDest) - extMnt.Val = proto.String(m.Source) - req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) - } - } - for _, iface := range c.config.Networks { - switch iface.Type { - case "veth": - veth := new(criurpc.CriuVethPair) - veth.IfOut = proto.String(iface.HostInterfaceName) - veth.IfIn = proto.String(iface.Name) - req.Opts.Veths = append(req.Opts.Veths, veth) - break - case "loopback": - break - } - } - - var ( - fds []string - fdJSON []byte - ) - - if fdJSON, err = ioutil.ReadFile(filepath.Join(criuOpts.ImagesDirectory, descriptors_filename)); err != nil { - return err - } - - if err = json.Unmarshal(fdJSON, &fds); err != nil { - return err - } - - for i := range fds { - if s := fds[i]; strings.Contains(s, "pipe:") { - inheritFd := new(criurpc.InheritFd) - inheritFd.Key = proto.String(s) - inheritFd.Fd = proto.Int32(int32(i)) - req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd) - } - } - - err = c.criuSwrk(process, &req, criuOpts) - if err != nil { - return err - } - return nil -} - -func (c *linuxContainer) criuSwrk(process *Process, req *criurpc.CriuReq, opts *CriuOpts) error { - fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_SEQPACKET|syscall.SOCK_CLOEXEC, 0) - if err != nil { - return err - } - - criuClient := os.NewFile(uintptr(fds[0]), "criu-transport-client") - criuServer := os.NewFile(uintptr(fds[1]), "criu-transport-server") - defer criuClient.Close() - defer criuServer.Close() - - args := []string{"swrk", "3"} - cmd := exec.Command(c.criuPath, args...) - if process != nil { - cmd.Stdin = process.Stdin - cmd.Stdout = process.Stdout - cmd.Stderr = process.Stderr - } - cmd.ExtraFiles = append(cmd.ExtraFiles, criuServer) - - if err := cmd.Start(); err != nil { - return err - } - criuServer.Close() - - defer func() { - criuClient.Close() - _, err := cmd.Process.Wait() - if err != nil { - return - } - }() - - var extFds []string - if process != nil { - extFds, err = getPipeFds(cmd.Process.Pid) - if err != nil { - return err - } - } - - data, err := proto.Marshal(req) - if err != nil { - return err - } - _, err = criuClient.Write(data) - if err != nil { - return err - } - - buf := make([]byte, 10*4096) - for true { - n, err := criuClient.Read(buf) - if err != nil { - return err - } - if n == 0 { - return fmt.Errorf("unexpected EOF") - } - if n == len(buf) { - return fmt.Errorf("buffer is too small") - } - - resp := new(criurpc.CriuResp) - err = proto.Unmarshal(buf[:n], resp) - if err != nil { - return err - } - if !resp.GetSuccess() { - return fmt.Errorf("criu failed: type %s errno %d", req.GetType().String(), resp.GetCrErrno()) - } - - t := resp.GetType() - switch { - case t == criurpc.CriuReqType_NOTIFY: - if err := c.criuNotifications(resp, process, opts, extFds); err != nil { - return err - } - t = criurpc.CriuReqType_NOTIFY - req = &criurpc.CriuReq{ - Type: &t, - NotifySuccess: proto.Bool(true), - } - data, err = proto.Marshal(req) - if err != nil { - return err - } - n, err = criuClient.Write(data) - if err != nil { - return err - } - continue - case t == criurpc.CriuReqType_RESTORE: - case t == criurpc.CriuReqType_DUMP: - break - default: - return fmt.Errorf("unable to parse the response %s", resp.String()) - } - - break - } - - // cmd.Wait() waits cmd.goroutines which are used for proxying file descriptors. - // Here we want to wait only the CRIU process. - st, err := cmd.Process.Wait() - if err != nil { - return err - } - if !st.Success() { - return fmt.Errorf("criu failed: %s", st.String()) - } - return nil -} - -// block any external network activity -func lockNetwork(config *configs.Config) error { - for _, config := range config.Networks { - strategy, err := getStrategy(config.Type) - if err != nil { - return err - } - - if err := strategy.detach(config); err != nil { - return err - } - } - return nil -} - -func unlockNetwork(config *configs.Config) error { - for _, config := range config.Networks { - strategy, err := getStrategy(config.Type) - if err != nil { - return err - } - if err = strategy.attach(config); err != nil { - return err - } - } - return nil -} - -func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Process, opts *CriuOpts, fds []string) error { - notify := resp.GetNotify() - if notify == nil { - return fmt.Errorf("invalid response: %s", resp.String()) - } - - switch { - case notify.GetScript() == "post-dump": - if !opts.LeaveRunning { - f, err := os.Create(filepath.Join(c.root, "checkpoint")) - if err != nil { - return err - } - f.Close() - } - break - - case notify.GetScript() == "network-unlock": - if err := unlockNetwork(c.config); err != nil { - return err - } - break - - case notify.GetScript() == "network-lock": - if err := lockNetwork(c.config); err != nil { - return err - } - break - - case notify.GetScript() == "post-restore": - pid := notify.GetPid() - r, err := newRestoredProcess(int(pid), fds) - if err != nil { - return err - } - - // TODO: crosbymichael restore previous process information by saving the init process information in - // the container's state file or separate process state files. - if err := c.updateState(r); err != nil { - return err - } - process.ops = r - break - } - - return nil -} - -func (c *linuxContainer) updateState(process parentProcess) error { - c.initProcess = process - state, err := c.currentState() - if err != nil { - return err - } - f, err := os.Create(filepath.Join(c.root, stateFilename)) - if err != nil { - return err - } - defer f.Close() - os.Remove(filepath.Join(c.root, "checkpoint")) - return json.NewEncoder(f).Encode(state) -} - -func (c *linuxContainer) currentStatus() (Status, error) { - if _, err := os.Stat(filepath.Join(c.root, "checkpoint")); err == nil { - return Checkpointed, nil - } - if c.initProcess == nil { - return Destroyed, nil - } - // return Running if the init process is alive - if err := syscall.Kill(c.initProcess.pid(), 0); err != nil { - if err == syscall.ESRCH { - return Destroyed, nil - } - return 0, newSystemError(err) - } - if c.config.Cgroups != nil && c.config.Cgroups.Freezer == configs.Frozen { - return Paused, nil - } - return Running, nil -} - -func (c *linuxContainer) currentState() (*State, error) { - status, err := c.currentStatus() - if err != nil { - return nil, err - } - if status == Destroyed { - return nil, newGenericError(fmt.Errorf("container destroyed"), ContainerNotExists) - } - startTime, err := c.initProcess.startTime() - if err != nil { - return nil, newSystemError(err) - } - state := &State{ - ID: c.ID(), - Config: *c.config, - InitProcessPid: c.initProcess.pid(), - InitProcessStartTime: startTime, - CgroupPaths: c.cgroupManager.GetPaths(), - NamespacePaths: make(map[configs.NamespaceType]string), - ExternalDescriptors: c.initProcess.externalDescriptors(), - } - for _, ns := range c.config.Namespaces { - state.NamespacePaths[ns.Type] = ns.GetPath(c.initProcess.pid()) - } - for _, nsType := range configs.NamespaceTypes() { - if _, ok := state.NamespacePaths[nsType]; !ok { - ns := configs.Namespace{Type: nsType} - state.NamespacePaths[ns.Type] = ns.GetPath(c.initProcess.pid()) - } - } - return state, nil -} diff --git a/vendor/github.com/docker/libcontainer/container_linux_test.go b/vendor/github.com/docker/libcontainer/container_linux_test.go deleted file mode 100644 index a70a102e..00000000 --- a/vendor/github.com/docker/libcontainer/container_linux_test.go +++ /dev/null @@ -1,212 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "os" - "testing" - - "github.com/docker/libcontainer/cgroups" - "github.com/docker/libcontainer/configs" -) - -type mockCgroupManager struct { - pids []int - stats *cgroups.Stats - paths map[string]string -} - -func (m *mockCgroupManager) GetPids() ([]int, error) { - return m.pids, nil -} - -func (m *mockCgroupManager) GetStats() (*cgroups.Stats, error) { - return m.stats, nil -} - -func (m *mockCgroupManager) Apply(pid int) error { - return nil -} - -func (m *mockCgroupManager) Set(container *configs.Config) error { - return nil -} - -func (m *mockCgroupManager) Destroy() error { - return nil -} - -func (m *mockCgroupManager) GetPaths() map[string]string { - return m.paths -} - -func (m *mockCgroupManager) Freeze(state configs.FreezerState) error { - return nil -} - -type mockProcess struct { - _pid int - started string -} - -func (m *mockProcess) terminate() error { - return nil -} - -func (m *mockProcess) pid() int { - return m._pid -} - -func (m *mockProcess) startTime() (string, error) { - return m.started, nil -} - -func (m *mockProcess) start() error { - return nil -} - -func (m *mockProcess) wait() (*os.ProcessState, error) { - return nil, nil -} - -func (m *mockProcess) signal(_ os.Signal) error { - return nil -} - -func (p *mockProcess) externalDescriptors() []string { - return []string{} -} - -func (p *mockProcess) setExternalDescriptors(newFds []string) { -} - -func TestGetContainerPids(t *testing.T) { - container := &linuxContainer{ - id: "myid", - config: &configs.Config{}, - cgroupManager: &mockCgroupManager{pids: []int{1, 2, 3}}, - } - pids, err := container.Processes() - if err != nil { - t.Fatal(err) - } - for i, expected := range []int{1, 2, 3} { - if pids[i] != expected { - t.Fatalf("expected pid %d but received %d", expected, pids[i]) - } - } -} - -func TestGetContainerStats(t *testing.T) { - container := &linuxContainer{ - id: "myid", - config: &configs.Config{}, - cgroupManager: &mockCgroupManager{ - pids: []int{1, 2, 3}, - stats: &cgroups.Stats{ - MemoryStats: cgroups.MemoryStats{ - Usage: cgroups.MemoryData{ - Usage: 1024, - }, - }, - }, - }, - } - stats, err := container.Stats() - if err != nil { - t.Fatal(err) - } - if stats.CgroupStats == nil { - t.Fatal("cgroup stats are nil") - } - if stats.CgroupStats.MemoryStats.Usage.Usage != 1024 { - t.Fatalf("expected memory usage 1024 but recevied %d", stats.CgroupStats.MemoryStats.Usage.Usage) - } -} - -func TestGetContainerState(t *testing.T) { - var ( - pid = os.Getpid() - expectedMemoryPath = "/sys/fs/cgroup/memory/myid" - expectedNetworkPath = "/networks/fd" - ) - container := &linuxContainer{ - id: "myid", - config: &configs.Config{ - Namespaces: []configs.Namespace{ - {Type: configs.NEWPID}, - {Type: configs.NEWNS}, - {Type: configs.NEWNET, Path: expectedNetworkPath}, - {Type: configs.NEWUTS}, - // emulate host for IPC - //{Type: configs.NEWIPC}, - }, - }, - initProcess: &mockProcess{ - _pid: pid, - started: "010", - }, - cgroupManager: &mockCgroupManager{ - pids: []int{1, 2, 3}, - stats: &cgroups.Stats{ - MemoryStats: cgroups.MemoryStats{ - Usage: cgroups.MemoryData{ - Usage: 1024, - }, - }, - }, - paths: map[string]string{ - "memory": expectedMemoryPath, - }, - }, - } - state, err := container.State() - if err != nil { - t.Fatal(err) - } - if state.InitProcessPid != pid { - t.Fatalf("expected pid %d but received %d", pid, state.InitProcessPid) - } - if state.InitProcessStartTime != "010" { - t.Fatalf("expected process start time 010 but received %s", state.InitProcessStartTime) - } - paths := state.CgroupPaths - if paths == nil { - t.Fatal("cgroup paths should not be nil") - } - if memPath := paths["memory"]; memPath != expectedMemoryPath { - t.Fatalf("expected memory path %q but received %q", expectedMemoryPath, memPath) - } - for _, ns := range container.config.Namespaces { - path := state.NamespacePaths[ns.Type] - if path == "" { - t.Fatalf("expected non nil namespace path for %s", ns.Type) - } - if ns.Type == configs.NEWNET { - if path != expectedNetworkPath { - t.Fatalf("expected path %q but received %q", expectedNetworkPath, path) - } - } else { - file := "" - switch ns.Type { - case configs.NEWNET: - file = "net" - case configs.NEWNS: - file = "mnt" - case configs.NEWPID: - file = "pid" - case configs.NEWIPC: - file = "ipc" - case configs.NEWUSER: - file = "user" - case configs.NEWUTS: - file = "uts" - } - expected := fmt.Sprintf("/proc/%d/ns/%s", pid, file) - if expected != path { - t.Fatalf("expected path %q but received %q", expected, path) - } - } - } -} diff --git a/vendor/github.com/docker/libcontainer/container_nouserns_linux.go b/vendor/github.com/docker/libcontainer/container_nouserns_linux.go deleted file mode 100644 index 3b75d593..00000000 --- a/vendor/github.com/docker/libcontainer/container_nouserns_linux.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !go1.4 - -package libcontainer - -import ( - "fmt" - "syscall" -) - -// not available before go 1.4 -func (c *linuxContainer) addUidGidMappings(sys *syscall.SysProcAttr) error { - return fmt.Errorf("User namespace is not supported in golang < 1.4") -} diff --git a/vendor/github.com/docker/libcontainer/container_userns_linux.go b/vendor/github.com/docker/libcontainer/container_userns_linux.go deleted file mode 100644 index 5f4cf3c9..00000000 --- a/vendor/github.com/docker/libcontainer/container_userns_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build go1.4 - -package libcontainer - -import "syscall" - -// Converts IDMap to SysProcIDMap array and adds it to SysProcAttr. -func (c *linuxContainer) addUidGidMappings(sys *syscall.SysProcAttr) error { - if c.config.UidMappings != nil { - sys.UidMappings = make([]syscall.SysProcIDMap, len(c.config.UidMappings)) - for i, um := range c.config.UidMappings { - sys.UidMappings[i].ContainerID = um.ContainerID - sys.UidMappings[i].HostID = um.HostID - sys.UidMappings[i].Size = um.Size - } - } - if c.config.GidMappings != nil { - sys.GidMappings = make([]syscall.SysProcIDMap, len(c.config.GidMappings)) - for i, gm := range c.config.GidMappings { - sys.GidMappings[i].ContainerID = gm.ContainerID - sys.GidMappings[i].HostID = gm.HostID - sys.GidMappings[i].Size = gm.Size - } - } - return nil -} diff --git a/vendor/github.com/docker/libcontainer/criu_opts.go b/vendor/github.com/docker/libcontainer/criu_opts.go deleted file mode 100644 index 9e9563e7..00000000 --- a/vendor/github.com/docker/libcontainer/criu_opts.go +++ /dev/null @@ -1,16 +0,0 @@ -package libcontainer - -type CriuPageServerInfo struct { - Address string // IP address of CRIU page server - Port int32 // port number of CRIU page server -} - -type CriuOpts struct { - ImagesDirectory string // directory for storing image files - WorkDirectory string // directory to cd and write logs/pidfiles/stats to - LeaveRunning bool // leave container in running state after checkpoint - TcpEstablished bool // checkpoint/restore established TCP connections - ExternalUnixConnections bool // allow external unix connections - ShellJob bool // allow to dump and restore shell jobs - PageServer CriuPageServerInfo // allow to dump to criu page server -} diff --git a/vendor/github.com/docker/libcontainer/error.go b/vendor/github.com/docker/libcontainer/error.go deleted file mode 100644 index 6c266620..00000000 --- a/vendor/github.com/docker/libcontainer/error.go +++ /dev/null @@ -1,62 +0,0 @@ -package libcontainer - -import "io" - -// API error code type. -type ErrorCode int - -// API error codes. -const ( - // Factory errors - IdInUse ErrorCode = iota - InvalidIdFormat - - // Container errors - ContainerNotExists - ContainerPaused - ContainerNotStopped - ContainerNotRunning - - // Process errors - ProcessNotExecuted - - // Common errors - ConfigInvalid - SystemError -) - -func (c ErrorCode) String() string { - switch c { - case IdInUse: - return "Id already in use" - case InvalidIdFormat: - return "Invalid format" - case ContainerPaused: - return "Container paused" - case ConfigInvalid: - return "Invalid configuration" - case SystemError: - return "System error" - case ContainerNotExists: - return "Container does not exist" - case ContainerNotStopped: - return "Container is not stopped" - case ContainerNotRunning: - return "Container is not running" - default: - return "Unknown error" - } -} - -// API Error type. -type Error interface { - error - - // Returns a verbose string including the error message - // and a representation of the stack trace suitable for - // printing. - Detail(w io.Writer) error - - // Returns the error code for this error. - Code() ErrorCode -} diff --git a/vendor/github.com/docker/libcontainer/error_test.go b/vendor/github.com/docker/libcontainer/error_test.go deleted file mode 100644 index 4bf4c9f5..00000000 --- a/vendor/github.com/docker/libcontainer/error_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package libcontainer - -import "testing" - -func TestErrorCode(t *testing.T) { - codes := map[ErrorCode]string{ - IdInUse: "Id already in use", - InvalidIdFormat: "Invalid format", - ContainerPaused: "Container paused", - ConfigInvalid: "Invalid configuration", - SystemError: "System error", - ContainerNotExists: "Container does not exist", - } - - for code, expected := range codes { - if actual := code.String(); actual != expected { - t.Fatalf("expected string %q but received %q", expected, actual) - } - } -} diff --git a/vendor/github.com/docker/libcontainer/factory.go b/vendor/github.com/docker/libcontainer/factory.go deleted file mode 100644 index 2b3ff85d..00000000 --- a/vendor/github.com/docker/libcontainer/factory.go +++ /dev/null @@ -1,45 +0,0 @@ -package libcontainer - -import ( - "github.com/docker/libcontainer/configs" -) - -type Factory interface { - // Creates a new container with the given id and starts the initial process inside it. - // id must be a string containing only letters, digits and underscores and must contain - // between 1 and 1024 characters, inclusive. - // - // The id must not already be in use by an existing container. Containers created using - // a factory with the same path (and file system) must have distinct ids. - // - // Returns the new container with a running process. - // - // errors: - // IdInUse - id is already in use by a container - // InvalidIdFormat - id has incorrect format - // ConfigInvalid - config is invalid - // Systemerror - System error - // - // On error, any partially created container parts are cleaned up (the operation is atomic). - Create(id string, config *configs.Config) (Container, error) - - // Load takes an ID for an existing container and returns the container information - // from the state. This presents a read only view of the container. - // - // errors: - // Path does not exist - // Container is stopped - // System error - Load(id string) (Container, error) - - // StartInitialization is an internal API to libcontainer used during the reexec of the - // container. - // - // Errors: - // Pipe connection error - // System error - StartInitialization() error - - // Type returns info string about factory type (e.g. lxc, libcontainer...) - Type() string -} diff --git a/vendor/github.com/docker/libcontainer/factory_linux.go b/vendor/github.com/docker/libcontainer/factory_linux.go deleted file mode 100644 index 26b351ac..00000000 --- a/vendor/github.com/docker/libcontainer/factory_linux.go +++ /dev/null @@ -1,263 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "regexp" - "strconv" - "syscall" - - "github.com/docker/docker/pkg/mount" - "github.com/docker/libcontainer/cgroups" - "github.com/docker/libcontainer/cgroups/fs" - "github.com/docker/libcontainer/cgroups/systemd" - "github.com/docker/libcontainer/configs" - "github.com/docker/libcontainer/configs/validate" -) - -const ( - stateFilename = "state.json" -) - -var ( - idRegex = regexp.MustCompile(`^[\w_]+$`) - maxIdLen = 1024 -) - -// InitArgs returns an options func to configure a LinuxFactory with the -// provided init arguments. -func InitArgs(args ...string) func(*LinuxFactory) error { - return func(l *LinuxFactory) error { - name := args[0] - if filepath.Base(name) == name { - if lp, err := exec.LookPath(name); err == nil { - name = lp - } - } - l.InitPath = name - l.InitArgs = append([]string{name}, args[1:]...) - return nil - } -} - -// InitPath returns an options func to configure a LinuxFactory with the -// provided absolute path to the init binary and arguements. -func InitPath(path string, args ...string) func(*LinuxFactory) error { - return func(l *LinuxFactory) error { - l.InitPath = path - l.InitArgs = args - return nil - } -} - -// SystemdCgroups is an options func to configure a LinuxFactory to return -// containers that use systemd to create and manage cgroups. -func SystemdCgroups(l *LinuxFactory) error { - l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { - return &systemd.Manager{ - Cgroups: config, - Paths: paths, - } - } - return nil -} - -// Cgroupfs is an options func to configure a LinuxFactory to return -// containers that use the native cgroups filesystem implementation to -// create and manage cgroups. -func Cgroupfs(l *LinuxFactory) error { - l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { - return &fs.Manager{ - Cgroups: config, - Paths: paths, - } - } - return nil -} - -// TmpfsRoot is an option func to mount LinuxFactory.Root to tmpfs. -func TmpfsRoot(l *LinuxFactory) error { - mounted, err := mount.Mounted(l.Root) - if err != nil { - return err - } - if !mounted { - if err := syscall.Mount("tmpfs", l.Root, "tmpfs", 0, ""); err != nil { - return err - } - } - return nil -} - -// New returns a linux based container factory based in the root directory and -// configures the factory with the provided option funcs. -func New(root string, options ...func(*LinuxFactory) error) (Factory, error) { - if root != "" { - if err := os.MkdirAll(root, 0700); err != nil { - return nil, newGenericError(err, SystemError) - } - } - l := &LinuxFactory{ - Root: root, - Validator: validate.New(), - CriuPath: "criu", - } - InitArgs(os.Args[0], "init")(l) - Cgroupfs(l) - for _, opt := range options { - if err := opt(l); err != nil { - return nil, err - } - } - return l, nil -} - -// LinuxFactory implements the default factory interface for linux based systems. -type LinuxFactory struct { - // Root directory for the factory to store state. - Root string - - // InitPath is the absolute path to the init binary. - InitPath string - - // InitArgs are arguments for calling the init responsibilities for spawning - // a container. - InitArgs []string - - // CriuPath is the path to the criu binary used for checkpoint and restore of - // containers. - CriuPath string - - // Validator provides validation to container configurations. - Validator validate.Validator - - // NewCgroupsManager returns an initialized cgroups manager for a single container. - NewCgroupsManager func(config *configs.Cgroup, paths map[string]string) cgroups.Manager -} - -func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) { - if l.Root == "" { - return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid) - } - if err := l.validateID(id); err != nil { - return nil, err - } - if err := l.Validator.Validate(config); err != nil { - return nil, newGenericError(err, ConfigInvalid) - } - containerRoot := filepath.Join(l.Root, id) - if _, err := os.Stat(containerRoot); err == nil { - return nil, newGenericError(fmt.Errorf("Container with id exists: %v", id), IdInUse) - } else if !os.IsNotExist(err) { - return nil, newGenericError(err, SystemError) - } - if err := os.MkdirAll(containerRoot, 0700); err != nil { - return nil, newGenericError(err, SystemError) - } - return &linuxContainer{ - id: id, - root: containerRoot, - config: config, - initPath: l.InitPath, - initArgs: l.InitArgs, - criuPath: l.CriuPath, - cgroupManager: l.NewCgroupsManager(config.Cgroups, nil), - }, nil -} - -func (l *LinuxFactory) Load(id string) (Container, error) { - if l.Root == "" { - return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid) - } - containerRoot := filepath.Join(l.Root, id) - state, err := l.loadState(containerRoot) - if err != nil { - return nil, err - } - r := &nonChildProcess{ - processPid: state.InitProcessPid, - processStartTime: state.InitProcessStartTime, - fds: state.ExternalDescriptors, - } - return &linuxContainer{ - initProcess: r, - id: id, - config: &state.Config, - initPath: l.InitPath, - initArgs: l.InitArgs, - criuPath: l.CriuPath, - cgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths), - root: containerRoot, - }, nil -} - -func (l *LinuxFactory) Type() string { - return "libcontainer" -} - -// StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state -// This is a low level implementation detail of the reexec and should not be consumed externally -func (l *LinuxFactory) StartInitialization() (err error) { - pipefd, err := strconv.Atoi(os.Getenv("_LIBCONTAINER_INITPIPE")) - if err != nil { - return err - } - var ( - pipe = os.NewFile(uintptr(pipefd), "pipe") - it = initType(os.Getenv("_LIBCONTAINER_INITTYPE")) - ) - // clear the current process's environment to clean any libcontainer - // specific env vars. - os.Clearenv() - defer func() { - // if we have an error during the initialization of the container's init then send it back to the - // parent process in the form of an initError. - if err != nil { - // ensure that any data sent from the parent is consumed so it doesn't - // receive ECONNRESET when the child writes to the pipe. - ioutil.ReadAll(pipe) - if err := json.NewEncoder(pipe).Encode(newSystemError(err)); err != nil { - panic(err) - } - } - // ensure that this pipe is always closed - pipe.Close() - }() - i, err := newContainerInit(it, pipe) - if err != nil { - return err - } - return i.Init() -} - -func (l *LinuxFactory) loadState(root string) (*State, error) { - f, err := os.Open(filepath.Join(root, stateFilename)) - if err != nil { - if os.IsNotExist(err) { - return nil, newGenericError(err, ContainerNotExists) - } - return nil, newGenericError(err, SystemError) - } - defer f.Close() - var state *State - if err := json.NewDecoder(f).Decode(&state); err != nil { - return nil, newGenericError(err, SystemError) - } - return state, nil -} - -func (l *LinuxFactory) validateID(id string) error { - if !idRegex.MatchString(id) { - return newGenericError(fmt.Errorf("Invalid id format: %v", id), InvalidIdFormat) - } - if len(id) > maxIdLen { - return newGenericError(fmt.Errorf("Invalid id format: %v", id), InvalidIdFormat) - } - return nil -} diff --git a/vendor/github.com/docker/libcontainer/factory_linux_test.go b/vendor/github.com/docker/libcontainer/factory_linux_test.go deleted file mode 100644 index 00e39739..00000000 --- a/vendor/github.com/docker/libcontainer/factory_linux_test.go +++ /dev/null @@ -1,179 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/docker/docker/pkg/mount" - "github.com/docker/libcontainer/configs" -) - -func newTestRoot() (string, error) { - dir, err := ioutil.TempDir("", "libcontainer") - if err != nil { - return "", err - } - return dir, nil -} - -func TestFactoryNew(t *testing.T) { - root, rerr := newTestRoot() - if rerr != nil { - t.Fatal(rerr) - } - defer os.RemoveAll(root) - factory, err := New(root, Cgroupfs) - if err != nil { - t.Fatal(err) - } - if factory == nil { - t.Fatal("factory should not be nil") - } - lfactory, ok := factory.(*LinuxFactory) - if !ok { - t.Fatal("expected linux factory returned on linux based systems") - } - if lfactory.Root != root { - t.Fatalf("expected factory root to be %q but received %q", root, lfactory.Root) - } - - if factory.Type() != "libcontainer" { - t.Fatalf("unexpected factory type: %q, expected %q", factory.Type(), "libcontainer") - } -} - -func TestFactoryNewTmpfs(t *testing.T) { - root, rerr := newTestRoot() - if rerr != nil { - t.Fatal(rerr) - } - defer os.RemoveAll(root) - factory, err := New(root, Cgroupfs, TmpfsRoot) - if err != nil { - t.Fatal(err) - } - if factory == nil { - t.Fatal("factory should not be nil") - } - lfactory, ok := factory.(*LinuxFactory) - if !ok { - t.Fatal("expected linux factory returned on linux based systems") - } - if lfactory.Root != root { - t.Fatalf("expected factory root to be %q but received %q", root, lfactory.Root) - } - - if factory.Type() != "libcontainer" { - t.Fatalf("unexpected factory type: %q, expected %q", factory.Type(), "libcontainer") - } - mounted, err := mount.Mounted(lfactory.Root) - if err != nil { - t.Fatal(err) - } - if !mounted { - t.Fatalf("Factory Root is not mounted") - } - mounts, err := mount.GetMounts() - if err != nil { - t.Fatal(err) - } - var found bool - for _, m := range mounts { - if m.Mountpoint == lfactory.Root { - if m.Fstype != "tmpfs" { - t.Fatalf("Fstype of root: %s, expected %s", m.Fstype, "tmpfs") - } - if m.Source != "tmpfs" { - t.Fatalf("Source of root: %s, expected %s", m.Source, "tmpfs") - } - found = true - } - } - if !found { - t.Fatalf("Factory Root is not listed in mounts list") - } -} - -func TestFactoryLoadNotExists(t *testing.T) { - root, rerr := newTestRoot() - if rerr != nil { - t.Fatal(rerr) - } - defer os.RemoveAll(root) - factory, err := New(root, Cgroupfs) - if err != nil { - t.Fatal(err) - } - _, err = factory.Load("nocontainer") - if err == nil { - t.Fatal("expected nil error loading non-existing container") - } - lerr, ok := err.(Error) - if !ok { - t.Fatal("expected libcontainer error type") - } - if lerr.Code() != ContainerNotExists { - t.Fatalf("expected error code %s but received %s", ContainerNotExists, lerr.Code()) - } -} - -func TestFactoryLoadContainer(t *testing.T) { - root, err := newTestRoot() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) - // setup default container config and state for mocking - var ( - id = "1" - expectedConfig = &configs.Config{ - Rootfs: "/mycontainer/root", - } - expectedState = &State{ - InitProcessPid: 1024, - Config: *expectedConfig, - } - ) - if err := os.Mkdir(filepath.Join(root, id), 0700); err != nil { - t.Fatal(err) - } - if err := marshal(filepath.Join(root, id, stateFilename), expectedState); err != nil { - t.Fatal(err) - } - factory, err := New(root, Cgroupfs) - if err != nil { - t.Fatal(err) - } - container, err := factory.Load(id) - if err != nil { - t.Fatal(err) - } - if container.ID() != id { - t.Fatalf("expected container id %q but received %q", id, container.ID()) - } - config := container.Config() - if config.Rootfs != expectedConfig.Rootfs { - t.Fatalf("expected rootfs %q but received %q", expectedConfig.Rootfs, config.Rootfs) - } - lcontainer, ok := container.(*linuxContainer) - if !ok { - t.Fatal("expected linux container on linux based systems") - } - if lcontainer.initProcess.pid() != expectedState.InitProcessPid { - t.Fatalf("expected init pid %d but received %d", expectedState.InitProcessPid, lcontainer.initProcess.pid()) - } -} - -func marshal(path string, v interface{}) error { - f, err := os.Create(path) - if err != nil { - return err - } - defer f.Close() - return json.NewEncoder(f).Encode(v) -} diff --git a/vendor/github.com/docker/libcontainer/generic_error.go b/vendor/github.com/docker/libcontainer/generic_error.go deleted file mode 100644 index ff4d7248..00000000 --- a/vendor/github.com/docker/libcontainer/generic_error.go +++ /dev/null @@ -1,74 +0,0 @@ -package libcontainer - -import ( - "fmt" - "io" - "text/template" - "time" - - "github.com/docker/libcontainer/stacktrace" -) - -var errorTemplate = template.Must(template.New("error").Parse(`Timestamp: {{.Timestamp}} -Code: {{.ECode}} -{{if .Message }} -Message: {{.Message}} -{{end}} -Frames:{{range $i, $frame := .Stack.Frames}} ---- -{{$i}}: {{$frame.Function}} -Package: {{$frame.Package}} -File: {{$frame.File}}@{{$frame.Line}}{{end}} -`)) - -func newGenericError(err error, c ErrorCode) Error { - if le, ok := err.(Error); ok { - return le - } - gerr := &genericError{ - Timestamp: time.Now(), - Err: err, - ECode: c, - Stack: stacktrace.Capture(1), - } - if err != nil { - gerr.Message = err.Error() - } - return gerr -} - -func newSystemError(err error) Error { - if le, ok := err.(Error); ok { - return le - } - gerr := &genericError{ - Timestamp: time.Now(), - Err: err, - ECode: SystemError, - Stack: stacktrace.Capture(1), - } - if err != nil { - gerr.Message = err.Error() - } - return gerr -} - -type genericError struct { - Timestamp time.Time - ECode ErrorCode - Err error `json:"-"` - Message string - Stack stacktrace.Stacktrace -} - -func (e *genericError) Error() string { - return fmt.Sprintf("[%d] %s: %s", e.ECode, e.ECode, e.Message) -} - -func (e *genericError) Code() ErrorCode { - return e.ECode -} - -func (e *genericError) Detail(w io.Writer) error { - return errorTemplate.Execute(w, e) -} diff --git a/vendor/github.com/docker/libcontainer/generic_error_test.go b/vendor/github.com/docker/libcontainer/generic_error_test.go deleted file mode 100644 index 292d2a36..00000000 --- a/vendor/github.com/docker/libcontainer/generic_error_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package libcontainer - -import ( - "fmt" - "io/ioutil" - "testing" -) - -func TestErrorDetail(t *testing.T) { - err := newGenericError(fmt.Errorf("test error"), SystemError) - if derr := err.Detail(ioutil.Discard); derr != nil { - t.Fatal(derr) - } -} diff --git a/vendor/github.com/docker/libcontainer/init_linux.go b/vendor/github.com/docker/libcontainer/init_linux.go deleted file mode 100644 index f36e354f..00000000 --- a/vendor/github.com/docker/libcontainer/init_linux.go +++ /dev/null @@ -1,330 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "encoding/json" - "fmt" - "os" - "strings" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/libcontainer/cgroups" - "github.com/docker/libcontainer/configs" - "github.com/docker/libcontainer/netlink" - "github.com/docker/libcontainer/seccomp" - "github.com/docker/libcontainer/system" - "github.com/docker/libcontainer/user" - "github.com/docker/libcontainer/utils" -) - -type initType string - -const ( - initSetns initType = "setns" - initStandard initType = "standard" -) - -type pid struct { - Pid int `json:"pid"` -} - -// network is an internal struct used to setup container networks. -type network struct { - configs.Network - - // TempVethPeerName is a unique tempory veth peer name that was placed into - // the container's namespace. - TempVethPeerName string `json:"temp_veth_peer_name"` -} - -// initConfig is used for transferring parameters from Exec() to Init() -type initConfig struct { - Args []string `json:"args"` - Env []string `json:"env"` - Cwd string `json:"cwd"` - Capabilities []string `json:"capabilities"` - User string `json:"user"` - Config *configs.Config `json:"config"` - Console string `json:"console"` - Networks []*network `json:"network"` - PassedFilesCount int `json:"passed_files_count"` -} - -type initer interface { - Init() error -} - -func newContainerInit(t initType, pipe *os.File) (initer, error) { - var config *initConfig - if err := json.NewDecoder(pipe).Decode(&config); err != nil { - return nil, err - } - if err := populateProcessEnvironment(config.Env); err != nil { - return nil, err - } - switch t { - case initSetns: - return &linuxSetnsInit{ - config: config, - }, nil - case initStandard: - return &linuxStandardInit{ - parentPid: syscall.Getppid(), - config: config, - }, nil - } - return nil, fmt.Errorf("unknown init type %q", t) -} - -// populateProcessEnvironment loads the provided environment variables into the -// current processes's environment. -func populateProcessEnvironment(env []string) error { - for _, pair := range env { - p := strings.SplitN(pair, "=", 2) - if len(p) < 2 { - return fmt.Errorf("invalid environment '%v'", pair) - } - if err := os.Setenv(p[0], p[1]); err != nil { - return err - } - } - return nil -} - -// finalizeNamespace drops the caps, sets the correct user -// and working dir, and closes any leaked file descriptors -// before executing the command inside the namespace -func finalizeNamespace(config *initConfig) error { - // Ensure that all unwanted fds we may have accidentally - // inherited are marked close-on-exec so they stay out of the - // container - if err := utils.CloseExecFrom(config.PassedFilesCount + 3); err != nil { - return err - } - - capabilities := config.Config.Capabilities - if config.Capabilities != nil { - capabilities = config.Capabilities - } - w, err := newCapWhitelist(capabilities) - if err != nil { - return err - } - // drop capabilities in bounding set before changing user - if err := w.dropBoundingSet(); err != nil { - return err - } - // preserve existing capabilities while we change users - if err := system.SetKeepCaps(); err != nil { - return err - } - if err := setupUser(config); err != nil { - return err - } - if err := system.ClearKeepCaps(); err != nil { - return err - } - // drop all other capabilities - if err := w.drop(); err != nil { - return err - } - if config.Cwd != "" { - if err := syscall.Chdir(config.Cwd); err != nil { - return err - } - } - return nil -} - -// joinExistingNamespaces gets all the namespace paths specified for the container and -// does a setns on the namespace fd so that the current process joins the namespace. -func joinExistingNamespaces(namespaces []configs.Namespace) error { - for _, ns := range namespaces { - if ns.Path != "" { - f, err := os.OpenFile(ns.Path, os.O_RDONLY, 0) - if err != nil { - return err - } - err = system.Setns(f.Fd(), uintptr(ns.Syscall())) - f.Close() - if err != nil { - return err - } - } - } - return nil -} - -// setupUser changes the groups, gid, and uid for the user inside the container -func setupUser(config *initConfig) error { - // Set up defaults. - defaultExecUser := user.ExecUser{ - Uid: syscall.Getuid(), - Gid: syscall.Getgid(), - Home: "/", - } - passwdPath, err := user.GetPasswdPath() - if err != nil { - return err - } - groupPath, err := user.GetGroupPath() - if err != nil { - return err - } - execUser, err := user.GetExecUserPath(config.User, &defaultExecUser, passwdPath, groupPath) - if err != nil { - return err - } - - var addGroups []int - if len(config.Config.AdditionalGroups) > 0 { - addGroups, err = user.GetAdditionalGroupsPath(config.Config.AdditionalGroups, groupPath) - if err != nil { - return err - } - } - - suppGroups := append(execUser.Sgids, addGroups...) - if err := syscall.Setgroups(suppGroups); err != nil { - return err - } - - if err := system.Setgid(execUser.Gid); err != nil { - return err - } - if err := system.Setuid(execUser.Uid); err != nil { - return err - } - // if we didn't get HOME already, set it based on the user's HOME - if envHome := os.Getenv("HOME"); envHome == "" { - if err := os.Setenv("HOME", execUser.Home); err != nil { - return err - } - } - return nil -} - -// setupNetwork sets up and initializes any network interface inside the container. -func setupNetwork(config *initConfig) error { - for _, config := range config.Networks { - strategy, err := getStrategy(config.Type) - if err != nil { - return err - } - if err := strategy.initialize(config); err != nil { - return err - } - } - return nil -} - -func setupRoute(config *configs.Config) error { - for _, config := range config.Routes { - if err := netlink.AddRoute(config.Destination, config.Source, config.Gateway, config.InterfaceName); err != nil { - return err - } - } - return nil -} - -func setupRlimits(config *configs.Config) error { - for _, rlimit := range config.Rlimits { - l := &syscall.Rlimit{Max: rlimit.Hard, Cur: rlimit.Soft} - if err := syscall.Setrlimit(rlimit.Type, l); err != nil { - return fmt.Errorf("error setting rlimit type %v: %v", rlimit.Type, err) - } - } - return nil -} - -// killCgroupProcesses freezes then iterates over all the processes inside the -// manager's cgroups sending a SIGKILL to each process then waiting for them to -// exit. -func killCgroupProcesses(m cgroups.Manager) error { - var procs []*os.Process - if err := m.Freeze(configs.Frozen); err != nil { - logrus.Warn(err) - } - pids, err := m.GetPids() - if err != nil { - m.Freeze(configs.Thawed) - return err - } - for _, pid := range pids { - if p, err := os.FindProcess(pid); err == nil { - procs = append(procs, p) - if err := p.Kill(); err != nil { - logrus.Warn(err) - } - } - } - if err := m.Freeze(configs.Thawed); err != nil { - logrus.Warn(err) - } - for _, p := range procs { - if _, err := p.Wait(); err != nil { - logrus.Warn(err) - } - } - return nil -} - -func finalizeSeccomp(config *initConfig) error { - if config.Config.Seccomp == nil { - return nil - } - context := seccomp.New() - for _, s := range config.Config.Seccomp.Syscalls { - ss := &seccomp.Syscall{ - Value: uint32(s.Value), - Action: seccompAction(s.Action), - } - if len(s.Args) > 0 { - ss.Args = seccompArgs(s.Args) - } - context.Add(ss) - } - return context.Load() -} - -func seccompAction(a configs.Action) seccomp.Action { - switch a { - case configs.Kill: - return seccomp.Kill - case configs.Trap: - return seccomp.Trap - case configs.Allow: - return seccomp.Allow - } - return seccomp.Error(syscall.Errno(int(a))) -} - -func seccompArgs(args []*configs.Arg) seccomp.Args { - var sa []seccomp.Arg - for _, a := range args { - sa = append(sa, seccomp.Arg{ - Index: uint32(a.Index), - Op: seccompOperator(a.Op), - Value: uint(a.Value), - }) - } - return seccomp.Args{sa} -} - -func seccompOperator(o configs.Operator) seccomp.Operator { - switch o { - case configs.EqualTo: - return seccomp.EqualTo - case configs.NotEqualTo: - return seccomp.NotEqualTo - case configs.GreatherThan: - return seccomp.GreatherThan - case configs.LessThan: - return seccomp.LessThan - case configs.MaskEqualTo: - return seccomp.MaskEqualTo - } - return 0 -} diff --git a/vendor/github.com/docker/libcontainer/netlink/MAINTAINERS b/vendor/github.com/docker/libcontainer/netlink/MAINTAINERS deleted file mode 100644 index 1cb55136..00000000 --- a/vendor/github.com/docker/libcontainer/netlink/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Michael Crosby (@crosbymichael) -Guillaume J. Charmes (@creack) diff --git a/vendor/github.com/docker/libcontainer/netlink/netlink.go b/vendor/github.com/docker/libcontainer/netlink/netlink.go deleted file mode 100644 index 90883660..00000000 --- a/vendor/github.com/docker/libcontainer/netlink/netlink.go +++ /dev/null @@ -1,31 +0,0 @@ -// Packet netlink provide access to low level Netlink sockets and messages. -// -// Actual implementations are in: -// netlink_linux.go -// netlink_darwin.go -package netlink - -import ( - "errors" - "net" -) - -var ( - ErrWrongSockType = errors.New("Wrong socket type") - ErrShortResponse = errors.New("Got short response from netlink") - ErrInterfaceExists = errors.New("Network interface already exists") -) - -// A Route is a subnet associated with the interface to reach it. -type Route struct { - *net.IPNet - Iface *net.Interface - Default bool -} - -// An IfAddr defines IP network settings for a given network interface -type IfAddr struct { - Iface *net.Interface - IP net.IP - IPNet *net.IPNet -} diff --git a/vendor/github.com/docker/libcontainer/netlink/netlink_linux.go b/vendor/github.com/docker/libcontainer/netlink/netlink_linux.go deleted file mode 100644 index c81c269e..00000000 --- a/vendor/github.com/docker/libcontainer/netlink/netlink_linux.go +++ /dev/null @@ -1,1321 +0,0 @@ -package netlink - -import ( - "encoding/binary" - "fmt" - "io" - "math/rand" - "net" - "os" - "sync/atomic" - "syscall" - "time" - "unsafe" -) - -const ( - IFNAMSIZ = 16 - DEFAULT_CHANGE = 0xFFFFFFFF - IFLA_INFO_KIND = 1 - IFLA_INFO_DATA = 2 - VETH_INFO_PEER = 1 - IFLA_MACVLAN_MODE = 1 - IFLA_VLAN_ID = 1 - IFLA_NET_NS_FD = 28 - IFLA_ADDRESS = 1 - IFLA_BRPORT_MODE = 4 - SIOC_BRADDBR = 0x89a0 - SIOC_BRDELBR = 0x89a1 - SIOC_BRADDIF = 0x89a2 - SIOC_BRDELIF = 0x89a3 -) - -const ( - MACVLAN_MODE_PRIVATE = 1 << iota - MACVLAN_MODE_VEPA - MACVLAN_MODE_BRIDGE - MACVLAN_MODE_PASSTHRU -) - -var nextSeqNr uint32 - -type ifreqHwaddr struct { - IfrnName [IFNAMSIZ]byte - IfruHwaddr syscall.RawSockaddr -} - -type ifreqIndex struct { - IfrnName [IFNAMSIZ]byte - IfruIndex int32 -} - -type ifreqFlags struct { - IfrnName [IFNAMSIZ]byte - Ifruflags uint16 -} - -var native binary.ByteOrder - -var rnd = rand.New(rand.NewSource(time.Now().UnixNano())) - -func init() { - var x uint32 = 0x01020304 - if *(*byte)(unsafe.Pointer(&x)) == 0x01 { - native = binary.BigEndian - } else { - native = binary.LittleEndian - } -} - -func getIpFamily(ip net.IP) int { - if len(ip) <= net.IPv4len { - return syscall.AF_INET - } - if ip.To4() != nil { - return syscall.AF_INET - } - return syscall.AF_INET6 -} - -type NetlinkRequestData interface { - Len() int - ToWireFormat() []byte -} - -type IfInfomsg struct { - syscall.IfInfomsg -} - -func newIfInfomsg(family int) *IfInfomsg { - return &IfInfomsg{ - IfInfomsg: syscall.IfInfomsg{ - Family: uint8(family), - }, - } -} - -func newIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { - msg := newIfInfomsg(family) - parent.children = append(parent.children, msg) - return msg -} - -func (msg *IfInfomsg) ToWireFormat() []byte { - length := syscall.SizeofIfInfomsg - b := make([]byte, length) - b[0] = msg.Family - b[1] = 0 - native.PutUint16(b[2:4], msg.Type) - native.PutUint32(b[4:8], uint32(msg.Index)) - native.PutUint32(b[8:12], msg.Flags) - native.PutUint32(b[12:16], msg.Change) - return b -} - -func (msg *IfInfomsg) Len() int { - return syscall.SizeofIfInfomsg -} - -type IfAddrmsg struct { - syscall.IfAddrmsg -} - -func newIfAddrmsg(family int) *IfAddrmsg { - return &IfAddrmsg{ - IfAddrmsg: syscall.IfAddrmsg{ - Family: uint8(family), - }, - } -} - -func (msg *IfAddrmsg) ToWireFormat() []byte { - length := syscall.SizeofIfAddrmsg - b := make([]byte, length) - b[0] = msg.Family - b[1] = msg.Prefixlen - b[2] = msg.Flags - b[3] = msg.Scope - native.PutUint32(b[4:8], msg.Index) - return b -} - -func (msg *IfAddrmsg) Len() int { - return syscall.SizeofIfAddrmsg -} - -type RtMsg struct { - syscall.RtMsg -} - -func newRtMsg() *RtMsg { - return &RtMsg{ - RtMsg: syscall.RtMsg{ - Table: syscall.RT_TABLE_MAIN, - Scope: syscall.RT_SCOPE_UNIVERSE, - Protocol: syscall.RTPROT_BOOT, - Type: syscall.RTN_UNICAST, - }, - } -} - -func (msg *RtMsg) ToWireFormat() []byte { - length := syscall.SizeofRtMsg - b := make([]byte, length) - b[0] = msg.Family - b[1] = msg.Dst_len - b[2] = msg.Src_len - b[3] = msg.Tos - b[4] = msg.Table - b[5] = msg.Protocol - b[6] = msg.Scope - b[7] = msg.Type - native.PutUint32(b[8:12], msg.Flags) - return b -} - -func (msg *RtMsg) Len() int { - return syscall.SizeofRtMsg -} - -func rtaAlignOf(attrlen int) int { - return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1) -} - -type RtAttr struct { - syscall.RtAttr - Data []byte - children []NetlinkRequestData -} - -func newRtAttr(attrType int, data []byte) *RtAttr { - return &RtAttr{ - RtAttr: syscall.RtAttr{ - Type: uint16(attrType), - }, - children: []NetlinkRequestData{}, - Data: data, - } -} - -func newRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { - attr := newRtAttr(attrType, data) - parent.children = append(parent.children, attr) - return attr -} - -func (a *RtAttr) Len() int { - if len(a.children) == 0 { - return (syscall.SizeofRtAttr + len(a.Data)) - } - - l := 0 - for _, child := range a.children { - l += child.Len() - } - l += syscall.SizeofRtAttr - return rtaAlignOf(l + len(a.Data)) -} - -func (a *RtAttr) ToWireFormat() []byte { - length := a.Len() - buf := make([]byte, rtaAlignOf(length)) - - if a.Data != nil { - copy(buf[4:], a.Data) - } else { - next := 4 - for _, child := range a.children { - childBuf := child.ToWireFormat() - copy(buf[next:], childBuf) - next += rtaAlignOf(len(childBuf)) - } - } - - if l := uint16(length); l != 0 { - native.PutUint16(buf[0:2], l) - } - native.PutUint16(buf[2:4], a.Type) - return buf -} - -func uint32Attr(t int, n uint32) *RtAttr { - buf := make([]byte, 4) - native.PutUint32(buf, n) - return newRtAttr(t, buf) -} - -type NetlinkRequest struct { - syscall.NlMsghdr - Data []NetlinkRequestData -} - -func (rr *NetlinkRequest) ToWireFormat() []byte { - length := rr.Len - dataBytes := make([][]byte, len(rr.Data)) - for i, data := range rr.Data { - dataBytes[i] = data.ToWireFormat() - length += uint32(len(dataBytes[i])) - } - b := make([]byte, length) - native.PutUint32(b[0:4], length) - native.PutUint16(b[4:6], rr.Type) - native.PutUint16(b[6:8], rr.Flags) - native.PutUint32(b[8:12], rr.Seq) - native.PutUint32(b[12:16], rr.Pid) - - next := 16 - for _, data := range dataBytes { - copy(b[next:], data) - next += len(data) - } - return b -} - -func (rr *NetlinkRequest) AddData(data NetlinkRequestData) { - if data != nil { - rr.Data = append(rr.Data, data) - } -} - -func newNetlinkRequest(proto, flags int) *NetlinkRequest { - return &NetlinkRequest{ - NlMsghdr: syscall.NlMsghdr{ - Len: uint32(syscall.NLMSG_HDRLEN), - Type: uint16(proto), - Flags: syscall.NLM_F_REQUEST | uint16(flags), - Seq: atomic.AddUint32(&nextSeqNr, 1), - }, - } -} - -type NetlinkSocket struct { - fd int - lsa syscall.SockaddrNetlink -} - -func getNetlinkSocket() (*NetlinkSocket, error) { - fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, syscall.NETLINK_ROUTE) - if err != nil { - return nil, err - } - s := &NetlinkSocket{ - fd: fd, - } - s.lsa.Family = syscall.AF_NETLINK - if err := syscall.Bind(fd, &s.lsa); err != nil { - syscall.Close(fd) - return nil, err - } - - return s, nil -} - -func (s *NetlinkSocket) Close() { - syscall.Close(s.fd) -} - -func (s *NetlinkSocket) Send(request *NetlinkRequest) error { - if err := syscall.Sendto(s.fd, request.ToWireFormat(), 0, &s.lsa); err != nil { - return err - } - return nil -} - -func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { - rb := make([]byte, syscall.Getpagesize()) - nr, _, err := syscall.Recvfrom(s.fd, rb, 0) - if err != nil { - return nil, err - } - if nr < syscall.NLMSG_HDRLEN { - return nil, ErrShortResponse - } - rb = rb[:nr] - return syscall.ParseNetlinkMessage(rb) -} - -func (s *NetlinkSocket) GetPid() (uint32, error) { - lsa, err := syscall.Getsockname(s.fd) - if err != nil { - return 0, err - } - switch v := lsa.(type) { - case *syscall.SockaddrNetlink: - return v.Pid, nil - } - return 0, ErrWrongSockType -} - -func (s *NetlinkSocket) CheckMessage(m syscall.NetlinkMessage, seq, pid uint32) error { - if m.Header.Seq != seq { - return fmt.Errorf("netlink: invalid seq %d, expected %d", m.Header.Seq, seq) - } - if m.Header.Pid != pid { - return fmt.Errorf("netlink: wrong pid %d, expected %d", m.Header.Pid, pid) - } - if m.Header.Type == syscall.NLMSG_DONE { - return io.EOF - } - if m.Header.Type == syscall.NLMSG_ERROR { - e := int32(native.Uint32(m.Data[0:4])) - if e == 0 { - return io.EOF - } - return syscall.Errno(-e) - } - return nil -} - -func (s *NetlinkSocket) HandleAck(seq uint32) error { - pid, err := s.GetPid() - if err != nil { - return err - } - -outer: - for { - msgs, err := s.Receive() - if err != nil { - return err - } - for _, m := range msgs { - if err := s.CheckMessage(m, seq, pid); err != nil { - if err == io.EOF { - break outer - } - return err - } - } - } - - return nil -} - -func zeroTerminated(s string) []byte { - return []byte(s + "\000") -} - -func nonZeroTerminated(s string) []byte { - return []byte(s) -} - -// Add a new network link of a specified type. -// This is identical to running: ip link add $name type $linkType -func NetworkLinkAdd(name string, linkType string) error { - if name == "" || linkType == "" { - return fmt.Errorf("Neither link name nor link type can be empty!") - } - - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - wb.AddData(msg) - - linkInfo := newRtAttr(syscall.IFLA_LINKINFO, nil) - newRtAttrChild(linkInfo, IFLA_INFO_KIND, nonZeroTerminated(linkType)) - wb.AddData(linkInfo) - - nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name)) - wb.AddData(nameData) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Delete a network link. -// This is identical to running: ip link del $name -func NetworkLinkDel(name string) error { - if name == "" { - return fmt.Errorf("Network link name can not be empty!") - } - - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - iface, err := net.InterfaceByName(name) - if err != nil { - return err - } - - wb := newNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Index = int32(iface.Index) - wb.AddData(msg) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Bring up a particular network interface. -// This is identical to running: ip link set dev $name up -func NetworkLinkUp(iface *net.Interface) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Index = int32(iface.Index) - msg.Flags = syscall.IFF_UP - msg.Change = syscall.IFF_UP - wb.AddData(msg) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Bring down a particular network interface. -// This is identical to running: ip link set $name down -func NetworkLinkDown(iface *net.Interface) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Index = int32(iface.Index) - msg.Flags = 0 & ^syscall.IFF_UP - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Set link layer address ie. MAC Address. -// This is identical to running: ip link set dev $name address $macaddress -func NetworkSetMacAddress(iface *net.Interface, macaddr string) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - hwaddr, err := net.ParseMAC(macaddr) - if err != nil { - return err - } - - var ( - MULTICAST byte = 0x1 - ) - - if hwaddr[0]&0x1 == MULTICAST { - return fmt.Errorf("Multicast MAC Address is not supported: %s", macaddr) - } - - wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - - macdata := make([]byte, 6) - copy(macdata, hwaddr) - data := newRtAttr(IFLA_ADDRESS, macdata) - wb.AddData(data) - - if err := s.Send(wb); err != nil { - return err - } - return s.HandleAck(wb.Seq) -} - -// Set link Maximum Transmission Unit -// This is identical to running: ip link set dev $name mtu $MTU -// bridge is a bitch here https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=292088 -// https://bugzilla.redhat.com/show_bug.cgi?id=697021 -// There is a discussion about how to deal with ifcs joining bridge with MTU > 1500 -// Regular network nterfaces do seem to work though! -func NetworkSetMTU(iface *net.Interface, mtu int) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Type = syscall.RTM_SETLINK - msg.Flags = syscall.NLM_F_REQUEST - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - wb.AddData(uint32Attr(syscall.IFLA_MTU, uint32(mtu))) - - if err := s.Send(wb); err != nil { - return err - } - return s.HandleAck(wb.Seq) -} - -// Set link queue length -// This is identical to running: ip link set dev $name txqueuelen $QLEN -func NetworkSetTxQueueLen(iface *net.Interface, txQueueLen int) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Type = syscall.RTM_SETLINK - msg.Flags = syscall.NLM_F_REQUEST - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - wb.AddData(uint32Attr(syscall.IFLA_TXQLEN, uint32(txQueueLen))) - - if err := s.Send(wb); err != nil { - return err - } - return s.HandleAck(wb.Seq) -} - -func networkMasterAction(iface *net.Interface, rtattr *RtAttr) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Type = syscall.RTM_SETLINK - msg.Flags = syscall.NLM_F_REQUEST - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - wb.AddData(rtattr) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Add an interface to bridge. -// This is identical to running: ip link set $name master $master -func NetworkSetMaster(iface, master *net.Interface) error { - data := uint32Attr(syscall.IFLA_MASTER, uint32(master.Index)) - return networkMasterAction(iface, data) -} - -// Remove an interface from the bridge -// This is is identical to to running: ip link $name set nomaster -func NetworkSetNoMaster(iface *net.Interface) error { - data := uint32Attr(syscall.IFLA_MASTER, 0) - return networkMasterAction(iface, data) -} - -func networkSetNsAction(iface *net.Interface, rtattr *RtAttr) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Index = int32(iface.Index) - wb.AddData(msg) - wb.AddData(rtattr) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Move a particular network interface to a particular network namespace -// specified by PID. This is identical to running: ip link set dev $name netns $pid -func NetworkSetNsPid(iface *net.Interface, nspid int) error { - data := uint32Attr(syscall.IFLA_NET_NS_PID, uint32(nspid)) - return networkSetNsAction(iface, data) -} - -// Move a particular network interface to a particular mounted -// network namespace specified by file descriptor. -// This is idential to running: ip link set dev $name netns $fd -func NetworkSetNsFd(iface *net.Interface, fd int) error { - data := uint32Attr(IFLA_NET_NS_FD, uint32(fd)) - return networkSetNsAction(iface, data) -} - -// Rename a particular interface to a different name -// !!! Note that you can't rename an active interface. You need to bring it down before renaming it. -// This is identical to running: ip link set dev ${oldName} name ${newName} -func NetworkChangeName(iface *net.Interface, newName string) error { - if len(newName) >= IFNAMSIZ { - return fmt.Errorf("Interface name %s too long", newName) - } - - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - - nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(newName)) - wb.AddData(nameData) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Add a new VETH pair link on the host -// This is identical to running: ip link add name $name type veth peer name $peername -func NetworkCreateVethPair(name1, name2 string, txQueueLen int) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - wb.AddData(msg) - - nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name1)) - wb.AddData(nameData) - - txqLen := make([]byte, 4) - native.PutUint32(txqLen, uint32(txQueueLen)) - txqData := newRtAttr(syscall.IFLA_TXQLEN, txqLen) - wb.AddData(txqData) - - nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) - newRtAttrChild(nest1, IFLA_INFO_KIND, zeroTerminated("veth")) - nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) - nest3 := newRtAttrChild(nest2, VETH_INFO_PEER, nil) - - newIfInfomsgChild(nest3, syscall.AF_UNSPEC) - newRtAttrChild(nest3, syscall.IFLA_IFNAME, zeroTerminated(name2)) - - txqLen2 := make([]byte, 4) - native.PutUint32(txqLen2, uint32(txQueueLen)) - newRtAttrChild(nest3, syscall.IFLA_TXQLEN, txqLen2) - - wb.AddData(nest1) - - if err := s.Send(wb); err != nil { - return err - } - - if err := s.HandleAck(wb.Seq); err != nil { - if os.IsExist(err) { - return ErrInterfaceExists - } - - return err - } - - return nil -} - -// Add a new VLAN interface with masterDev as its upper device -// This is identical to running: -// ip link add name $name link $masterdev type vlan id $id -func NetworkLinkAddVlan(masterDev, vlanDev string, vlanId uint16) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - - masterDevIfc, err := net.InterfaceByName(masterDev) - if err != nil { - return err - } - - msg := newIfInfomsg(syscall.AF_UNSPEC) - wb.AddData(msg) - - nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) - newRtAttrChild(nest1, IFLA_INFO_KIND, nonZeroTerminated("vlan")) - - nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) - vlanData := make([]byte, 2) - native.PutUint16(vlanData, vlanId) - newRtAttrChild(nest2, IFLA_VLAN_ID, vlanData) - wb.AddData(nest1) - - wb.AddData(uint32Attr(syscall.IFLA_LINK, uint32(masterDevIfc.Index))) - wb.AddData(newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(vlanDev))) - - if err := s.Send(wb); err != nil { - return err - } - return s.HandleAck(wb.Seq) -} - -// MacVlan link has LowerDev, UpperDev and operates in Mode mode -// This simplifies the code when creating MacVlan or MacVtap interface -type MacVlanLink struct { - MasterDev string - SlaveDev string - mode string -} - -func (m MacVlanLink) Mode() uint32 { - modeMap := map[string]uint32{ - "private": MACVLAN_MODE_PRIVATE, - "vepa": MACVLAN_MODE_VEPA, - "bridge": MACVLAN_MODE_BRIDGE, - "passthru": MACVLAN_MODE_PASSTHRU, - } - - return modeMap[m.mode] -} - -// Add MAC VLAN network interface with masterDev as its upper device -// This is identical to running: -// ip link add name $name link $masterdev type macvlan mode $mode -func networkLinkMacVlan(dev_type string, mcvln *MacVlanLink) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - - masterDevIfc, err := net.InterfaceByName(mcvln.MasterDev) - if err != nil { - return err - } - - msg := newIfInfomsg(syscall.AF_UNSPEC) - wb.AddData(msg) - - nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) - newRtAttrChild(nest1, IFLA_INFO_KIND, nonZeroTerminated(dev_type)) - - nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) - macVlanData := make([]byte, 4) - native.PutUint32(macVlanData, mcvln.Mode()) - newRtAttrChild(nest2, IFLA_MACVLAN_MODE, macVlanData) - wb.AddData(nest1) - - wb.AddData(uint32Attr(syscall.IFLA_LINK, uint32(masterDevIfc.Index))) - wb.AddData(newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(mcvln.SlaveDev))) - - if err := s.Send(wb); err != nil { - return err - } - return s.HandleAck(wb.Seq) -} - -func NetworkLinkAddMacVlan(masterDev, macVlanDev string, mode string) error { - return networkLinkMacVlan("macvlan", &MacVlanLink{ - MasterDev: masterDev, - SlaveDev: macVlanDev, - mode: mode, - }) -} - -func NetworkLinkAddMacVtap(masterDev, macVlanDev string, mode string) error { - return networkLinkMacVlan("macvtap", &MacVlanLink{ - MasterDev: masterDev, - SlaveDev: macVlanDev, - mode: mode, - }) -} - -func networkLinkIpAction(action, flags int, ifa IfAddr) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - family := getIpFamily(ifa.IP) - - wb := newNetlinkRequest(action, flags) - - msg := newIfAddrmsg(family) - msg.Index = uint32(ifa.Iface.Index) - prefixLen, _ := ifa.IPNet.Mask.Size() - msg.Prefixlen = uint8(prefixLen) - wb.AddData(msg) - - var ipData []byte - if family == syscall.AF_INET { - ipData = ifa.IP.To4() - } else { - ipData = ifa.IP.To16() - } - - localData := newRtAttr(syscall.IFA_LOCAL, ipData) - wb.AddData(localData) - - addrData := newRtAttr(syscall.IFA_ADDRESS, ipData) - wb.AddData(addrData) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Delete an IP address from an interface. This is identical to: -// ip addr del $ip/$ipNet dev $iface -func NetworkLinkDelIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { - return networkLinkIpAction( - syscall.RTM_DELADDR, - syscall.NLM_F_ACK, - IfAddr{iface, ip, ipNet}, - ) -} - -// Add an Ip address to an interface. This is identical to: -// ip addr add $ip/$ipNet dev $iface -func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { - return networkLinkIpAction( - syscall.RTM_NEWADDR, - syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK, - IfAddr{iface, ip, ipNet}, - ) -} - -// Returns an array of IPNet for all the currently routed subnets on ipv4 -// This is similar to the first column of "ip route" output -func NetworkGetRoutes() ([]Route, error) { - s, err := getNetlinkSocket() - if err != nil { - return nil, err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - wb.AddData(msg) - - if err := s.Send(wb); err != nil { - return nil, err - } - - pid, err := s.GetPid() - if err != nil { - return nil, err - } - - res := make([]Route, 0) - -outer: - for { - msgs, err := s.Receive() - if err != nil { - return nil, err - } - for _, m := range msgs { - if err := s.CheckMessage(m, wb.Seq, pid); err != nil { - if err == io.EOF { - break outer - } - return nil, err - } - if m.Header.Type != syscall.RTM_NEWROUTE { - continue - } - - var r Route - - msg := (*RtMsg)(unsafe.Pointer(&m.Data[0:syscall.SizeofRtMsg][0])) - - if msg.Flags&syscall.RTM_F_CLONED != 0 { - // Ignore cloned routes - continue - } - - if msg.Table != syscall.RT_TABLE_MAIN { - // Ignore non-main tables - continue - } - - if msg.Family != syscall.AF_INET { - // Ignore non-ipv4 routes - continue - } - - if msg.Dst_len == 0 { - // Default routes - r.Default = true - } - - attrs, err := syscall.ParseNetlinkRouteAttr(&m) - if err != nil { - return nil, err - } - for _, attr := range attrs { - switch attr.Attr.Type { - case syscall.RTA_DST: - ip := attr.Value - r.IPNet = &net.IPNet{ - IP: ip, - Mask: net.CIDRMask(int(msg.Dst_len), 8*len(ip)), - } - case syscall.RTA_OIF: - index := int(native.Uint32(attr.Value[0:4])) - r.Iface, _ = net.InterfaceByIndex(index) - } - } - if r.Default || r.IPNet != nil { - res = append(res, r) - } - } - } - - return res, nil -} - -// Add a new route table entry. -func AddRoute(destination, source, gateway, device string) error { - if destination == "" && source == "" && gateway == "" { - return fmt.Errorf("one of destination, source or gateway must not be blank") - } - - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - msg := newRtMsg() - currentFamily := -1 - var rtAttrs []*RtAttr - - if destination != "" { - destIP, destNet, err := net.ParseCIDR(destination) - if err != nil { - return fmt.Errorf("destination CIDR %s couldn't be parsed", destination) - } - destFamily := getIpFamily(destIP) - currentFamily = destFamily - destLen, bits := destNet.Mask.Size() - if destLen == 0 && bits == 0 { - return fmt.Errorf("destination CIDR %s generated a non-canonical Mask", destination) - } - msg.Family = uint8(destFamily) - msg.Dst_len = uint8(destLen) - var destData []byte - if destFamily == syscall.AF_INET { - destData = destIP.To4() - } else { - destData = destIP.To16() - } - rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_DST, destData)) - } - - if source != "" { - srcIP := net.ParseIP(source) - if srcIP == nil { - return fmt.Errorf("source IP %s couldn't be parsed", source) - } - srcFamily := getIpFamily(srcIP) - if currentFamily != -1 && currentFamily != srcFamily { - return fmt.Errorf("source and destination ip were not the same IP family") - } - currentFamily = srcFamily - msg.Family = uint8(srcFamily) - var srcData []byte - if srcFamily == syscall.AF_INET { - srcData = srcIP.To4() - } else { - srcData = srcIP.To16() - } - rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_PREFSRC, srcData)) - } - - if gateway != "" { - gwIP := net.ParseIP(gateway) - if gwIP == nil { - return fmt.Errorf("gateway IP %s couldn't be parsed", gateway) - } - gwFamily := getIpFamily(gwIP) - if currentFamily != -1 && currentFamily != gwFamily { - return fmt.Errorf("gateway, source, and destination ip were not the same IP family") - } - msg.Family = uint8(gwFamily) - var gwData []byte - if gwFamily == syscall.AF_INET { - gwData = gwIP.To4() - } else { - gwData = gwIP.To16() - } - rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_GATEWAY, gwData)) - } - - wb.AddData(msg) - for _, attr := range rtAttrs { - wb.AddData(attr) - } - - iface, err := net.InterfaceByName(device) - if err != nil { - return err - } - wb.AddData(uint32Attr(syscall.RTA_OIF, uint32(iface.Index))) - - if err := s.Send(wb); err != nil { - return err - } - return s.HandleAck(wb.Seq) -} - -// Add a new default gateway. Identical to: -// ip route add default via $ip -func AddDefaultGw(ip, device string) error { - return AddRoute("", "", ip, device) -} - -// THIS CODE DOES NOT COMMUNICATE WITH KERNEL VIA RTNETLINK INTERFACE -// IT IS HERE FOR BACKWARDS COMPATIBILITY WITH OLDER LINUX KERNELS -// WHICH SHIP WITH OLDER NOT ENTIRELY FUNCTIONAL VERSION OF NETLINK -func getIfSocket() (fd int, err error) { - for _, socket := range []int{ - syscall.AF_INET, - syscall.AF_PACKET, - syscall.AF_INET6, - } { - if fd, err = syscall.Socket(socket, syscall.SOCK_DGRAM, 0); err == nil { - break - } - } - if err == nil { - return fd, nil - } - return -1, err -} - -// Create the actual bridge device. This is more backward-compatible than -// netlink.NetworkLinkAdd and works on RHEL 6. -func CreateBridge(name string, setMacAddr bool) error { - if len(name) >= IFNAMSIZ { - return fmt.Errorf("Interface name %s too long", name) - } - - s, err := getIfSocket() - if err != nil { - return err - } - defer syscall.Close(s) - - nameBytePtr, err := syscall.BytePtrFromString(name) - if err != nil { - return err - } - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { - return err - } - if setMacAddr { - return SetMacAddress(name, randMacAddr()) - } - return nil -} - -// Delete the actual bridge device. -func DeleteBridge(name string) error { - s, err := getIfSocket() - if err != nil { - return err - } - defer syscall.Close(s) - - nameBytePtr, err := syscall.BytePtrFromString(name) - if err != nil { - return err - } - - var ifr ifreqFlags - copy(ifr.IfrnName[:len(ifr.IfrnName)-1], []byte(name)) - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), - syscall.SIOCSIFFLAGS, uintptr(unsafe.Pointer(&ifr))); err != 0 { - return err - } - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), - SIOC_BRDELBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { - return err - } - return nil -} - -func ifIoctBridge(iface, master *net.Interface, op uintptr) error { - if len(master.Name) >= IFNAMSIZ { - return fmt.Errorf("Interface name %s too long", master.Name) - } - - s, err := getIfSocket() - if err != nil { - return err - } - defer syscall.Close(s) - - ifr := ifreqIndex{} - copy(ifr.IfrnName[:len(ifr.IfrnName)-1], master.Name) - ifr.IfruIndex = int32(iface.Index) - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), op, uintptr(unsafe.Pointer(&ifr))); err != 0 { - return err - } - - return nil -} - -// Add a slave to a bridge device. This is more backward-compatible than -// netlink.NetworkSetMaster and works on RHEL 6. -func AddToBridge(iface, master *net.Interface) error { - return ifIoctBridge(iface, master, SIOC_BRADDIF) -} - -// Detach a slave from a bridge device. This is more backward-compatible than -// netlink.NetworkSetMaster and works on RHEL 6. -func DelFromBridge(iface, master *net.Interface) error { - return ifIoctBridge(iface, master, SIOC_BRDELIF) -} - -func randMacAddr() string { - hw := make(net.HardwareAddr, 6) - for i := 0; i < 6; i++ { - hw[i] = byte(rnd.Intn(255)) - } - hw[0] &^= 0x1 // clear multicast bit - hw[0] |= 0x2 // set local assignment bit (IEEE802) - return hw.String() -} - -func SetMacAddress(name, addr string) error { - if len(name) >= IFNAMSIZ { - return fmt.Errorf("Interface name %s too long", name) - } - - hw, err := net.ParseMAC(addr) - if err != nil { - return err - } - - s, err := getIfSocket() - if err != nil { - return err - } - defer syscall.Close(s) - - ifr := ifreqHwaddr{} - ifr.IfruHwaddr.Family = syscall.ARPHRD_ETHER - copy(ifr.IfrnName[:len(ifr.IfrnName)-1], name) - - for i := 0; i < 6; i++ { - ifr.IfruHwaddr.Data[i] = ifrDataByte(hw[i]) - } - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), syscall.SIOCSIFHWADDR, uintptr(unsafe.Pointer(&ifr))); err != 0 { - return err - } - return nil -} - -func SetHairpinMode(iface *net.Interface, enabled bool) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - req := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_BRIDGE) - msg.Type = syscall.RTM_SETLINK - msg.Flags = syscall.NLM_F_REQUEST - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - req.AddData(msg) - - mode := []byte{0} - if enabled { - mode[0] = byte(1) - } - - br := newRtAttr(syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED, nil) - newRtAttrChild(br, IFLA_BRPORT_MODE, mode) - req.AddData(br) - if err := s.Send(req); err != nil { - return err - } - - return s.HandleAck(req.Seq) -} - -func ChangeName(iface *net.Interface, newName string) error { - if len(newName) >= IFNAMSIZ { - return fmt.Errorf("Interface name %s too long", newName) - } - - fd, err := getIfSocket() - if err != nil { - return err - } - defer syscall.Close(fd) - - data := [IFNAMSIZ * 2]byte{} - // the "-1"s here are very important for ensuring we get proper null - // termination of our new C strings - copy(data[:IFNAMSIZ-1], iface.Name) - copy(data[IFNAMSIZ:IFNAMSIZ*2-1], newName) - - if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&data[0]))); errno != 0 { - return errno - } - - return nil -} diff --git a/vendor/github.com/docker/libcontainer/netlink/netlink_linux_armppc64.go b/vendor/github.com/docker/libcontainer/netlink/netlink_linux_armppc64.go deleted file mode 100644 index 965e0bfb..00000000 --- a/vendor/github.com/docker/libcontainer/netlink/netlink_linux_armppc64.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build arm ppc64 ppc64le - -package netlink - -func ifrDataByte(b byte) uint8 { - return uint8(b) -} diff --git a/vendor/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go b/vendor/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go deleted file mode 100644 index 74462798..00000000 --- a/vendor/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !arm,!ppc64,!ppc64le - -package netlink - -func ifrDataByte(b byte) int8 { - return int8(b) -} diff --git a/vendor/github.com/docker/libcontainer/netlink/netlink_linux_test.go b/vendor/github.com/docker/libcontainer/netlink/netlink_linux_test.go deleted file mode 100644 index 3f6511ab..00000000 --- a/vendor/github.com/docker/libcontainer/netlink/netlink_linux_test.go +++ /dev/null @@ -1,408 +0,0 @@ -package netlink - -import ( - "net" - "strings" - "syscall" - "testing" -) - -type testLink struct { - name string - linkType string -} - -func addLink(t *testing.T, name string, linkType string) { - if err := NetworkLinkAdd(name, linkType); err != nil { - t.Fatalf("Unable to create %s link: %s", name, err) - } -} - -func readLink(t *testing.T, name string) *net.Interface { - iface, err := net.InterfaceByName(name) - if err != nil { - t.Fatalf("Could not find %s interface: %s", name, err) - } - - return iface -} - -func deleteLink(t *testing.T, name string) { - if err := NetworkLinkDel(name); err != nil { - t.Fatalf("Unable to delete %s link: %s", name, err) - } -} - -func upLink(t *testing.T, name string) { - iface := readLink(t, name) - if err := NetworkLinkUp(iface); err != nil { - t.Fatalf("Could not bring UP %#v interface: %s", iface, err) - } -} - -func downLink(t *testing.T, name string) { - iface := readLink(t, name) - if err := NetworkLinkDown(iface); err != nil { - t.Fatalf("Could not bring DOWN %#v interface: %s", iface, err) - } -} - -func ipAssigned(iface *net.Interface, ip net.IP) bool { - addrs, _ := iface.Addrs() - - for _, addr := range addrs { - args := strings.SplitN(addr.String(), "/", 2) - if args[0] == ip.String() { - return true - } - } - - return false -} - -func TestNetworkLinkAddDel(t *testing.T) { - if testing.Short() { - return - } - - testLinks := []testLink{ - {"tstEth", "dummy"}, - {"tstBr", "bridge"}, - } - - for _, tl := range testLinks { - addLink(t, tl.name, tl.linkType) - defer deleteLink(t, tl.name) - readLink(t, tl.name) - } -} - -func TestNetworkLinkUpDown(t *testing.T) { - if testing.Short() { - return - } - - tl := testLink{name: "tstEth", linkType: "dummy"} - - addLink(t, tl.name, tl.linkType) - defer deleteLink(t, tl.name) - - upLink(t, tl.name) - ifcAfterUp := readLink(t, tl.name) - - if (ifcAfterUp.Flags & syscall.IFF_UP) != syscall.IFF_UP { - t.Fatalf("Could not bring UP %#v initerface", tl) - } - - downLink(t, tl.name) - ifcAfterDown := readLink(t, tl.name) - - if (ifcAfterDown.Flags & syscall.IFF_UP) == syscall.IFF_UP { - t.Fatalf("Could not bring DOWN %#v initerface", tl) - } -} - -func TestNetworkSetMacAddress(t *testing.T) { - if testing.Short() { - return - } - - tl := testLink{name: "tstEth", linkType: "dummy"} - macaddr := "22:ce:e0:99:63:6f" - - addLink(t, tl.name, tl.linkType) - defer deleteLink(t, tl.name) - - ifcBeforeSet := readLink(t, tl.name) - - if err := NetworkSetMacAddress(ifcBeforeSet, macaddr); err != nil { - t.Fatalf("Could not set %s MAC address on %#v interface: %s", macaddr, tl, err) - } - - ifcAfterSet := readLink(t, tl.name) - - if ifcAfterSet.HardwareAddr.String() != macaddr { - t.Fatalf("Could not set %s MAC address on %#v interface", macaddr, tl) - } -} - -func TestNetworkSetMTU(t *testing.T) { - if testing.Short() { - return - } - - tl := testLink{name: "tstEth", linkType: "dummy"} - mtu := 1400 - - addLink(t, tl.name, tl.linkType) - defer deleteLink(t, tl.name) - - ifcBeforeSet := readLink(t, tl.name) - - if err := NetworkSetMTU(ifcBeforeSet, mtu); err != nil { - t.Fatalf("Could not set %d MTU on %#v interface: %s", mtu, tl, err) - } - - ifcAfterSet := readLink(t, tl.name) - - if ifcAfterSet.MTU != mtu { - t.Fatalf("Could not set %d MTU on %#v interface", mtu, tl) - } -} - -func TestNetworkSetMasterNoMaster(t *testing.T) { - if testing.Short() { - return - } - - master := testLink{"tstBr", "bridge"} - slave := testLink{"tstEth", "dummy"} - testLinks := []testLink{master, slave} - - for _, tl := range testLinks { - addLink(t, tl.name, tl.linkType) - defer deleteLink(t, tl.name) - upLink(t, tl.name) - } - - masterIfc := readLink(t, master.name) - slaveIfc := readLink(t, slave.name) - if err := NetworkSetMaster(slaveIfc, masterIfc); err != nil { - t.Fatalf("Could not set %#v to be the master of %#v: %s", master, slave, err) - } - - // Trying to figure out a way to test which will not break on RHEL6. - // We could check for existence of /sys/class/net/tstEth/upper_tstBr - // which should point to the ../tstBr which is the UPPER device i.e. network bridge - - if err := NetworkSetNoMaster(slaveIfc); err != nil { - t.Fatalf("Could not UNset %#v master of %#v: %s", master, slave, err) - } -} - -func TestNetworkChangeName(t *testing.T) { - if testing.Short() { - return - } - - tl := testLink{"tstEth", "dummy"} - newName := "newTst" - - addLink(t, tl.name, tl.linkType) - - linkIfc := readLink(t, tl.name) - if err := NetworkChangeName(linkIfc, newName); err != nil { - deleteLink(t, tl.name) - t.Fatalf("Could not change %#v interface name to %s: %s", tl, newName, err) - } - - readLink(t, newName) - deleteLink(t, newName) -} - -func TestNetworkLinkAddVlan(t *testing.T) { - if testing.Short() { - return - } - - tl := struct { - name string - id uint16 - }{ - name: "tstVlan", - id: 32, - } - masterLink := testLink{"tstEth", "dummy"} - - addLink(t, masterLink.name, masterLink.linkType) - defer deleteLink(t, masterLink.name) - - if err := NetworkLinkAddVlan(masterLink.name, tl.name, tl.id); err != nil { - t.Fatalf("Unable to create %#v VLAN interface: %s", tl, err) - } - - readLink(t, tl.name) -} - -func TestNetworkLinkAddMacVlan(t *testing.T) { - if testing.Short() { - return - } - - tl := struct { - name string - mode string - }{ - name: "tstVlan", - mode: "private", - } - masterLink := testLink{"tstEth", "dummy"} - - addLink(t, masterLink.name, masterLink.linkType) - defer deleteLink(t, masterLink.name) - - if err := NetworkLinkAddMacVlan(masterLink.name, tl.name, tl.mode); err != nil { - t.Fatalf("Unable to create %#v MAC VLAN interface: %s", tl, err) - } - - readLink(t, tl.name) -} - -func TestNetworkLinkAddMacVtap(t *testing.T) { - if testing.Short() { - return - } - - tl := struct { - name string - mode string - }{ - name: "tstVtap", - mode: "private", - } - masterLink := testLink{"tstEth", "dummy"} - - addLink(t, masterLink.name, masterLink.linkType) - defer deleteLink(t, masterLink.name) - - if err := NetworkLinkAddMacVtap(masterLink.name, tl.name, tl.mode); err != nil { - t.Fatalf("Unable to create %#v MAC VTAP interface: %s", tl, err) - } - - readLink(t, tl.name) -} - -func TestAddDelNetworkIp(t *testing.T) { - if testing.Short() { - return - } - - ifaceName := "lo" - ip := net.ParseIP("127.0.1.1") - mask := net.IPv4Mask(255, 255, 255, 255) - ipNet := &net.IPNet{IP: ip, Mask: mask} - - iface, err := net.InterfaceByName(ifaceName) - if err != nil { - t.Skip("No 'lo' interface; skipping tests") - } - - if err := NetworkLinkAddIp(iface, ip, ipNet); err != nil { - t.Fatalf("Could not add IP address %s to interface %#v: %s", ip.String(), iface, err) - } - - if !ipAssigned(iface, ip) { - t.Fatalf("Could not locate address '%s' in lo address list.", ip.String()) - } - - if err := NetworkLinkDelIp(iface, ip, ipNet); err != nil { - t.Fatalf("Could not delete IP address %s from interface %#v: %s", ip.String(), iface, err) - } - - if ipAssigned(iface, ip) { - t.Fatalf("Located address '%s' in lo address list after removal.", ip.String()) - } -} - -func TestAddRouteSourceSelection(t *testing.T) { - tstIp := "127.1.1.1" - tl := testLink{name: "tstEth", linkType: "dummy"} - - addLink(t, tl.name, tl.linkType) - defer deleteLink(t, tl.name) - - ip := net.ParseIP(tstIp) - mask := net.IPv4Mask(255, 255, 255, 255) - ipNet := &net.IPNet{IP: ip, Mask: mask} - - iface, err := net.InterfaceByName(tl.name) - if err != nil { - t.Fatalf("Lost created link %#v", tl) - } - - if err := NetworkLinkAddIp(iface, ip, ipNet); err != nil { - t.Fatalf("Could not add IP address %s to interface %#v: %s", ip.String(), iface, err) - } - - upLink(t, tl.name) - defer downLink(t, tl.name) - - if err := AddRoute("127.0.0.0/8", tstIp, "", tl.name); err != nil { - t.Fatalf("Failed to add route with source address") - } -} - -func TestCreateVethPair(t *testing.T) { - if testing.Short() { - return - } - - var ( - name1 = "veth1" - name2 = "veth2" - ) - - if err := NetworkCreateVethPair(name1, name2, 0); err != nil { - t.Fatalf("Could not create veth pair %s %s: %s", name1, name2, err) - } - defer NetworkLinkDel(name1) - - readLink(t, name1) - readLink(t, name2) -} - -// -// netlink package tests which do not use RTNETLINK -// -func TestCreateBridgeWithMac(t *testing.T) { - if testing.Short() { - return - } - - name := "testbridge" - - if err := CreateBridge(name, true); err != nil { - t.Fatal(err) - } - - if _, err := net.InterfaceByName(name); err != nil { - t.Fatal(err) - } - - // cleanup and tests - - if err := DeleteBridge(name); err != nil { - t.Fatal(err) - } - - if _, err := net.InterfaceByName(name); err == nil { - t.Fatalf("expected error getting interface because %s bridge was deleted", name) - } -} - -func TestSetMacAddress(t *testing.T) { - if testing.Short() { - return - } - - name := "testmac" - mac := randMacAddr() - - if err := NetworkLinkAdd(name, "bridge"); err != nil { - t.Fatal(err) - } - defer NetworkLinkDel(name) - - if err := SetMacAddress(name, mac); err != nil { - t.Fatal(err) - } - - iface, err := net.InterfaceByName(name) - if err != nil { - t.Fatal(err) - } - - if iface.HardwareAddr.String() != mac { - t.Fatalf("mac address %q does not match %q", iface.HardwareAddr, mac) - } -} diff --git a/vendor/github.com/docker/libcontainer/netlink/netlink_unsupported.go b/vendor/github.com/docker/libcontainer/netlink/netlink_unsupported.go deleted file mode 100644 index 4b11bf8b..00000000 --- a/vendor/github.com/docker/libcontainer/netlink/netlink_unsupported.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build !linux - -package netlink - -import ( - "errors" - "net" -) - -var ( - ErrNotImplemented = errors.New("not implemented") -) - -func NetworkGetRoutes() ([]Route, error) { - return nil, ErrNotImplemented -} - -func NetworkLinkAdd(name string, linkType string) error { - return ErrNotImplemented -} - -func NetworkLinkDel(name string) error { - return ErrNotImplemented -} - -func NetworkLinkUp(iface *net.Interface) error { - return ErrNotImplemented -} - -func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { - return ErrNotImplemented -} - -func NetworkLinkDelIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { - return ErrNotImplemented -} - -func AddRoute(destination, source, gateway, device string) error { - return ErrNotImplemented -} - -func AddDefaultGw(ip, device string) error { - return ErrNotImplemented -} - -func NetworkSetMTU(iface *net.Interface, mtu int) error { - return ErrNotImplemented -} - -func NetworkSetTxQueueLen(iface *net.Interface, txQueueLen int) error { - return ErrNotImplemented -} - -func NetworkCreateVethPair(name1, name2 string, txQueueLen int) error { - return ErrNotImplemented -} - -func NetworkChangeName(iface *net.Interface, newName string) error { - return ErrNotImplemented -} - -func NetworkSetNsFd(iface *net.Interface, fd int) error { - return ErrNotImplemented -} - -func NetworkSetNsPid(iface *net.Interface, nspid int) error { - return ErrNotImplemented -} - -func NetworkSetMaster(iface, master *net.Interface) error { - return ErrNotImplemented -} - -func NetworkLinkDown(iface *net.Interface) error { - return ErrNotImplemented -} - -func CreateBridge(name string, setMacAddr bool) error { - return ErrNotImplemented -} - -func DeleteBridge(name string) error { - return ErrNotImplemented -} - -func AddToBridge(iface, master *net.Interface) error { - return ErrNotImplemented -} diff --git a/vendor/github.com/docker/libcontainer/network_linux.go b/vendor/github.com/docker/libcontainer/network_linux.go deleted file mode 100644 index b88009ff..00000000 --- a/vendor/github.com/docker/libcontainer/network_linux.go +++ /dev/null @@ -1,248 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "io/ioutil" - "net" - "path/filepath" - "strconv" - "strings" - - "github.com/docker/libcontainer/configs" - "github.com/docker/libcontainer/netlink" - "github.com/docker/libcontainer/utils" -) - -var strategies = map[string]networkStrategy{ - "veth": &veth{}, - "loopback": &loopback{}, -} - -// networkStrategy represents a specific network configuration for -// a container's networking stack -type networkStrategy interface { - create(*network, int) error - initialize(*network) error - detach(*configs.Network) error - attach(*configs.Network) error -} - -// getStrategy returns the specific network strategy for the -// provided type. -func getStrategy(tpe string) (networkStrategy, error) { - s, exists := strategies[tpe] - if !exists { - return nil, fmt.Errorf("unknown strategy type %q", tpe) - } - return s, nil -} - -// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo. -func getNetworkInterfaceStats(interfaceName string) (*NetworkInterface, error) { - out := &NetworkInterface{Name: interfaceName} - // This can happen if the network runtime information is missing - possible if the - // container was created by an old version of libcontainer. - if interfaceName == "" { - return out, nil - } - type netStatsPair struct { - // Where to write the output. - Out *uint64 - // The network stats file to read. - File string - } - // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container. - netStats := []netStatsPair{ - {Out: &out.RxBytes, File: "tx_bytes"}, - {Out: &out.RxPackets, File: "tx_packets"}, - {Out: &out.RxErrors, File: "tx_errors"}, - {Out: &out.RxDropped, File: "tx_dropped"}, - - {Out: &out.TxBytes, File: "rx_bytes"}, - {Out: &out.TxPackets, File: "rx_packets"}, - {Out: &out.TxErrors, File: "rx_errors"}, - {Out: &out.TxDropped, File: "rx_dropped"}, - } - for _, netStat := range netStats { - data, err := readSysfsNetworkStats(interfaceName, netStat.File) - if err != nil { - return nil, err - } - *(netStat.Out) = data - } - return out, nil -} - -// Reads the specified statistics available under /sys/class/net//statistics -func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) { - data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile)) - if err != nil { - return 0, err - } - return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) -} - -// loopback is a network strategy that provides a basic loopback device -type loopback struct { -} - -func (l *loopback) create(n *network, nspid int) error { - return nil -} - -func (l *loopback) initialize(config *network) error { - iface, err := net.InterfaceByName("lo") - if err != nil { - return err - } - return netlink.NetworkLinkUp(iface) -} - -func (l *loopback) attach(n *configs.Network) (err error) { - return nil -} - -func (l *loopback) detach(n *configs.Network) (err error) { - return nil -} - -// veth is a network strategy that uses a bridge and creates -// a veth pair, one that is attached to the bridge on the host and the other -// is placed inside the container's namespace -type veth struct { -} - -func (v *veth) detach(n *configs.Network) (err error) { - bridge, err := net.InterfaceByName(n.Bridge) - if err != nil { - return err - } - host, err := net.InterfaceByName(n.HostInterfaceName) - if err != nil { - return err - } - if err := netlink.DelFromBridge(host, bridge); err != nil { - return err - } - return nil -} - -// attach a container network interface to an external network -func (v *veth) attach(n *configs.Network) (err error) { - bridge, err := net.InterfaceByName(n.Bridge) - if err != nil { - return err - } - host, err := net.InterfaceByName(n.HostInterfaceName) - if err != nil { - return err - } - if err := netlink.AddToBridge(host, bridge); err != nil { - return err - } - if err := netlink.NetworkSetMTU(host, n.Mtu); err != nil { - return err - } - if n.HairpinMode { - if err := netlink.SetHairpinMode(host, true); err != nil { - return err - } - } - if err := netlink.NetworkLinkUp(host); err != nil { - return err - } - - return nil -} - -func (v *veth) create(n *network, nspid int) (err error) { - tmpName, err := v.generateTempPeerName() - if err != nil { - return err - } - n.TempVethPeerName = tmpName - defer func() { - if err != nil { - netlink.NetworkLinkDel(n.HostInterfaceName) - netlink.NetworkLinkDel(n.TempVethPeerName) - } - }() - if n.Bridge == "" { - return fmt.Errorf("bridge is not specified") - } - if err := netlink.NetworkCreateVethPair(n.HostInterfaceName, n.TempVethPeerName, n.TxQueueLen); err != nil { - return err - } - if err := v.attach(&n.Network); err != nil { - return err - } - child, err := net.InterfaceByName(n.TempVethPeerName) - if err != nil { - return err - } - return netlink.NetworkSetNsPid(child, nspid) -} - -func (v *veth) generateTempPeerName() (string, error) { - return utils.GenerateRandomName("veth", 7) -} - -func (v *veth) initialize(config *network) error { - peer := config.TempVethPeerName - if peer == "" { - return fmt.Errorf("peer is not specified") - } - child, err := net.InterfaceByName(peer) - if err != nil { - return err - } - if err := netlink.NetworkLinkDown(child); err != nil { - return err - } - if err := netlink.NetworkChangeName(child, config.Name); err != nil { - return err - } - // get the interface again after we changed the name as the index also changes. - if child, err = net.InterfaceByName(config.Name); err != nil { - return err - } - if config.MacAddress != "" { - if err := netlink.NetworkSetMacAddress(child, config.MacAddress); err != nil { - return err - } - } - ip, ipNet, err := net.ParseCIDR(config.Address) - if err != nil { - return err - } - if err := netlink.NetworkLinkAddIp(child, ip, ipNet); err != nil { - return err - } - if config.IPv6Address != "" { - if ip, ipNet, err = net.ParseCIDR(config.IPv6Address); err != nil { - return err - } - if err := netlink.NetworkLinkAddIp(child, ip, ipNet); err != nil { - return err - } - } - if err := netlink.NetworkSetMTU(child, config.Mtu); err != nil { - return err - } - if err := netlink.NetworkLinkUp(child); err != nil { - return err - } - if config.Gateway != "" { - if err := netlink.AddDefaultGw(config.Gateway, config.Name); err != nil { - return err - } - } - if config.IPv6Gateway != "" { - if err := netlink.AddDefaultGw(config.IPv6Gateway, config.Name); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/docker/libcontainer/notify_linux.go b/vendor/github.com/docker/libcontainer/notify_linux.go deleted file mode 100644 index cf81e24d..00000000 --- a/vendor/github.com/docker/libcontainer/notify_linux.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "syscall" -) - -const oomCgroupName = "memory" - -// notifyOnOOM returns channel on which you can expect event about OOM, -// if process died without OOM this channel will be closed. -// s is current *libcontainer.State for container. -func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) { - dir := paths[oomCgroupName] - if dir == "" { - return nil, fmt.Errorf("There is no path for %q in state", oomCgroupName) - } - oomControl, err := os.Open(filepath.Join(dir, "memory.oom_control")) - if err != nil { - return nil, err - } - fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) - if syserr != 0 { - oomControl.Close() - return nil, syserr - } - - eventfd := os.NewFile(fd, "eventfd") - - eventControlPath := filepath.Join(dir, "cgroup.event_control") - data := fmt.Sprintf("%d %d", eventfd.Fd(), oomControl.Fd()) - if err := ioutil.WriteFile(eventControlPath, []byte(data), 0700); err != nil { - eventfd.Close() - oomControl.Close() - return nil, err - } - ch := make(chan struct{}) - go func() { - defer func() { - close(ch) - eventfd.Close() - oomControl.Close() - }() - buf := make([]byte, 8) - for { - if _, err := eventfd.Read(buf); err != nil { - return - } - // When a cgroup is destroyed, an event is sent to eventfd. - // So if the control path is gone, return instead of notifying. - if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) { - return - } - ch <- struct{}{} - } - }() - return ch, nil -} diff --git a/vendor/github.com/docker/libcontainer/notify_linux_test.go b/vendor/github.com/docker/libcontainer/notify_linux_test.go deleted file mode 100644 index 09bdf644..00000000 --- a/vendor/github.com/docker/libcontainer/notify_linux_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "encoding/binary" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" - "time" -) - -func TestNotifyOnOOM(t *testing.T) { - memoryPath, err := ioutil.TempDir("", "testnotifyoom-") - if err != nil { - t.Fatal(err) - } - oomPath := filepath.Join(memoryPath, "memory.oom_control") - eventPath := filepath.Join(memoryPath, "cgroup.event_control") - if err := ioutil.WriteFile(oomPath, []byte{}, 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(eventPath, []byte{}, 0700); err != nil { - t.Fatal(err) - } - var eventFd, oomControlFd int - paths := map[string]string{ - "memory": memoryPath, - } - ooms, err := notifyOnOOM(paths) - if err != nil { - t.Fatal("expected no error, got:", err) - } - - data, err := ioutil.ReadFile(eventPath) - if err != nil { - t.Fatal("couldn't read event control file:", err) - } - - if _, err := fmt.Sscanf(string(data), "%d %d", &eventFd, &oomControlFd); err != nil { - t.Fatalf("invalid control data %q: %s", data, err) - } - - // re-open the eventfd - efd, err := syscall.Dup(eventFd) - if err != nil { - t.Fatal("unable to reopen eventfd:", err) - } - defer syscall.Close(efd) - - if err != nil { - t.Fatal("unable to dup event fd:", err) - } - - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, 1) - - if _, err := syscall.Write(efd, buf); err != nil { - t.Fatal("unable to write to eventfd:", err) - } - - select { - case <-ooms: - case <-time.After(100 * time.Millisecond): - t.Fatal("no notification on oom channel after 100ms") - } - - // simulate what happens when a cgroup is destroyed by cleaning up and then - // writing to the eventfd. - if err := os.RemoveAll(memoryPath); err != nil { - t.Fatal(err) - } - if _, err := syscall.Write(efd, buf); err != nil { - t.Fatal("unable to write to eventfd:", err) - } - - // give things a moment to shut down - select { - case _, ok := <-ooms: - if ok { - t.Fatal("expected no oom to be triggered") - } - case <-time.After(100 * time.Millisecond): - } - - if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(oomControlFd), syscall.F_GETFD, 0); err != syscall.EBADF { - t.Error("expected oom control to be closed") - } - - if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(eventFd), syscall.F_GETFD, 0); err != syscall.EBADF { - t.Error("expected event fd to be closed") - } -} diff --git a/vendor/github.com/docker/libcontainer/process.go b/vendor/github.com/docker/libcontainer/process.go deleted file mode 100644 index 7902d08c..00000000 --- a/vendor/github.com/docker/libcontainer/process.go +++ /dev/null @@ -1,89 +0,0 @@ -package libcontainer - -import ( - "fmt" - "io" - "math" - "os" -) - -type processOperations interface { - wait() (*os.ProcessState, error) - signal(sig os.Signal) error - pid() int -} - -// Process specifies the configuration and IO for a process inside -// a container. -type Process struct { - // The command to be run followed by any arguments. - Args []string - - // Env specifies the environment variables for the process. - Env []string - - // User will set the uid and gid of the executing process running inside the container - // local to the container's user and group configuration. - User string - - // Cwd will change the processes current working directory inside the container's rootfs. - Cwd string - - // Stdin is a pointer to a reader which provides the standard input stream. - Stdin io.Reader - - // Stdout is a pointer to a writer which receives the standard output stream. - Stdout io.Writer - - // Stderr is a pointer to a writer which receives the standard error stream. - Stderr io.Writer - - // ExtraFiles specifies additional open files to be inherited by the container - ExtraFiles []*os.File - - // consolePath is the path to the console allocated to the container. - consolePath string - - // Capabilities specify the capabilities to keep when executing the process inside the container - // All capabilities not specified will be dropped from the processes capability mask - Capabilities []string - - ops processOperations -} - -// Wait waits for the process to exit. -// Wait releases any resources associated with the Process -func (p Process) Wait() (*os.ProcessState, error) { - if p.ops == nil { - return nil, newGenericError(fmt.Errorf("invalid process"), ProcessNotExecuted) - } - return p.ops.wait() -} - -// Pid returns the process ID -func (p Process) Pid() (int, error) { - // math.MinInt32 is returned here, because it's invalid value - // for the kill() system call. - if p.ops == nil { - return math.MinInt32, newGenericError(fmt.Errorf("invalid process"), ProcessNotExecuted) - } - return p.ops.pid(), nil -} - -// Signal sends a signal to the Process. -func (p Process) Signal(sig os.Signal) error { - if p.ops == nil { - return newGenericError(fmt.Errorf("invalid process"), ProcessNotExecuted) - } - return p.ops.signal(sig) -} - -// NewConsole creates new console for process and returns it -func (p *Process) NewConsole(rootuid int) (Console, error) { - console, err := newConsole(rootuid, rootuid) - if err != nil { - return nil, err - } - p.consolePath = console.Path() - return console, nil -} diff --git a/vendor/github.com/docker/libcontainer/process_linux.go b/vendor/github.com/docker/libcontainer/process_linux.go deleted file mode 100644 index 4a448c22..00000000 --- a/vendor/github.com/docker/libcontainer/process_linux.go +++ /dev/null @@ -1,303 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "encoding/json" - "errors" - "io" - "os" - "os/exec" - "path/filepath" - "strconv" - "syscall" - - "github.com/docker/libcontainer/cgroups" - "github.com/docker/libcontainer/system" -) - -type parentProcess interface { - // pid returns the pid for the running process. - pid() int - - // start starts the process execution. - start() error - - // send a SIGKILL to the process and wait for the exit. - terminate() error - - // wait waits on the process returning the process state. - wait() (*os.ProcessState, error) - - // startTime return's the process start time. - startTime() (string, error) - - signal(os.Signal) error - - externalDescriptors() []string - - setExternalDescriptors(fds []string) -} - -type setnsProcess struct { - cmd *exec.Cmd - parentPipe *os.File - childPipe *os.File - cgroupPaths map[string]string - config *initConfig - fds []string -} - -func (p *setnsProcess) startTime() (string, error) { - return system.GetProcessStartTime(p.pid()) -} - -func (p *setnsProcess) signal(sig os.Signal) error { - s, ok := sig.(syscall.Signal) - if !ok { - return errors.New("os: unsupported signal type") - } - return syscall.Kill(p.cmd.Process.Pid, s) -} - -func (p *setnsProcess) start() (err error) { - defer p.parentPipe.Close() - if err = p.execSetns(); err != nil { - return newSystemError(err) - } - if len(p.cgroupPaths) > 0 { - if err := cgroups.EnterPid(p.cgroupPaths, p.cmd.Process.Pid); err != nil { - return newSystemError(err) - } - } - if err := json.NewEncoder(p.parentPipe).Encode(p.config); err != nil { - return newSystemError(err) - } - if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil { - return newSystemError(err) - } - // wait for the child process to fully complete and receive an error message - // if one was encoutered - var ierr *genericError - if err := json.NewDecoder(p.parentPipe).Decode(&ierr); err != nil && err != io.EOF { - return newSystemError(err) - } - if ierr != nil { - return newSystemError(ierr) - } - - return nil -} - -// execSetns runs the process that executes C code to perform the setns calls -// because setns support requires the C process to fork off a child and perform the setns -// before the go runtime boots, we wait on the process to die and receive the child's pid -// over the provided pipe. -func (p *setnsProcess) execSetns() error { - err := p.cmd.Start() - p.childPipe.Close() - if err != nil { - return newSystemError(err) - } - status, err := p.cmd.Process.Wait() - if err != nil { - p.cmd.Wait() - return newSystemError(err) - } - if !status.Success() { - p.cmd.Wait() - return newSystemError(&exec.ExitError{ProcessState: status}) - } - var pid *pid - if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil { - p.cmd.Wait() - return newSystemError(err) - } - - process, err := os.FindProcess(pid.Pid) - if err != nil { - return err - } - - p.cmd.Process = process - return nil -} - -// terminate sends a SIGKILL to the forked process for the setns routine then waits to -// avoid the process becomming a zombie. -func (p *setnsProcess) terminate() error { - if p.cmd.Process == nil { - return nil - } - err := p.cmd.Process.Kill() - if _, werr := p.wait(); err == nil { - err = werr - } - return err -} - -func (p *setnsProcess) wait() (*os.ProcessState, error) { - err := p.cmd.Wait() - if err != nil { - return p.cmd.ProcessState, err - } - - return p.cmd.ProcessState, nil -} - -func (p *setnsProcess) pid() int { - return p.cmd.Process.Pid -} - -func (p *setnsProcess) externalDescriptors() []string { - return p.fds -} - -func (p *setnsProcess) setExternalDescriptors(newFds []string) { - p.fds = newFds -} - -type initProcess struct { - cmd *exec.Cmd - parentPipe *os.File - childPipe *os.File - config *initConfig - manager cgroups.Manager - container *linuxContainer - fds []string -} - -func (p *initProcess) pid() int { - return p.cmd.Process.Pid -} - -func (p *initProcess) externalDescriptors() []string { - return p.fds -} - -func (p *initProcess) start() error { - defer p.parentPipe.Close() - err := p.cmd.Start() - p.childPipe.Close() - if err != nil { - return newSystemError(err) - } - // Save the standard descriptor names before the container process - // can potentially move them (e.g., via dup2()). If we don't do this now, - // we won't know at checkpoint time which file descriptor to look up. - fds, err := getPipeFds(p.pid()) - if err != nil { - return newSystemError(err) - } - p.setExternalDescriptors(fds) - - // Do this before syncing with child so that no children - // can escape the cgroup - if err := p.manager.Apply(p.pid()); err != nil { - return newSystemError(err) - } - defer func() { - if err != nil { - // TODO: should not be the responsibility to call here - p.manager.Destroy() - } - }() - if err := p.createNetworkInterfaces(); err != nil { - return newSystemError(err) - } - if err := p.sendConfig(); err != nil { - return newSystemError(err) - } - // wait for the child process to fully complete and receive an error message - // if one was encoutered - var ierr *genericError - if err := json.NewDecoder(p.parentPipe).Decode(&ierr); err != nil && err != io.EOF { - return newSystemError(err) - } - if ierr != nil { - return newSystemError(ierr) - } - return nil -} - -func (p *initProcess) wait() (*os.ProcessState, error) { - err := p.cmd.Wait() - if err != nil { - return p.cmd.ProcessState, err - } - // we should kill all processes in cgroup when init is died if we use host PID namespace - if p.cmd.SysProcAttr.Cloneflags&syscall.CLONE_NEWPID == 0 { - killCgroupProcesses(p.manager) - } - return p.cmd.ProcessState, nil -} - -func (p *initProcess) terminate() error { - if p.cmd.Process == nil { - return nil - } - err := p.cmd.Process.Kill() - if _, werr := p.wait(); err == nil { - err = werr - } - return err -} - -func (p *initProcess) startTime() (string, error) { - return system.GetProcessStartTime(p.pid()) -} - -func (p *initProcess) sendConfig() error { - // send the state to the container's init process then shutdown writes for the parent - if err := json.NewEncoder(p.parentPipe).Encode(p.config); err != nil { - return err - } - // shutdown writes for the parent side of the pipe - return syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR) -} - -func (p *initProcess) createNetworkInterfaces() error { - for _, config := range p.config.Config.Networks { - strategy, err := getStrategy(config.Type) - if err != nil { - return err - } - n := &network{ - Network: *config, - } - if err := strategy.create(n, p.pid()); err != nil { - return err - } - p.config.Networks = append(p.config.Networks, n) - } - return nil -} - -func (p *initProcess) signal(sig os.Signal) error { - s, ok := sig.(syscall.Signal) - if !ok { - return errors.New("os: unsupported signal type") - } - return syscall.Kill(p.cmd.Process.Pid, s) -} - -func (p *initProcess) setExternalDescriptors(newFds []string) { - p.fds = newFds -} - -func getPipeFds(pid int) ([]string, error) { - var fds []string - - fds = make([]string, 3) - - dirPath := filepath.Join("/proc", strconv.Itoa(pid), "/fd") - for i := 0; i < 3; i++ { - f := filepath.Join(dirPath, strconv.Itoa(i)) - target, err := os.Readlink(f) - if err != nil { - return fds, err - } - fds[i] = target - } - return fds, nil -} diff --git a/vendor/github.com/docker/libcontainer/restored_process.go b/vendor/github.com/docker/libcontainer/restored_process.go deleted file mode 100644 index 978b1a1d..00000000 --- a/vendor/github.com/docker/libcontainer/restored_process.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "os" - - "github.com/docker/libcontainer/system" -) - -func newRestoredProcess(pid int, fds []string) (*restoredProcess, error) { - var ( - err error - ) - proc, err := os.FindProcess(pid) - if err != nil { - return nil, err - } - started, err := system.GetProcessStartTime(pid) - if err != nil { - return nil, err - } - return &restoredProcess{ - proc: proc, - processStartTime: started, - fds: fds, - }, nil -} - -type restoredProcess struct { - proc *os.Process - processStartTime string - fds []string -} - -func (p *restoredProcess) start() error { - return newGenericError(fmt.Errorf("restored process cannot be started"), SystemError) -} - -func (p *restoredProcess) pid() int { - return p.proc.Pid -} - -func (p *restoredProcess) terminate() error { - err := p.proc.Kill() - if _, werr := p.wait(); err == nil { - err = werr - } - return err -} - -func (p *restoredProcess) wait() (*os.ProcessState, error) { - // TODO: how do we wait on the actual process? - // maybe use --exec-cmd in criu - st, err := p.proc.Wait() - if err != nil { - return nil, err - } - return st, nil -} - -func (p *restoredProcess) startTime() (string, error) { - return p.processStartTime, nil -} - -func (p *restoredProcess) signal(s os.Signal) error { - return p.proc.Signal(s) -} - -func (p *restoredProcess) externalDescriptors() []string { - return p.fds -} - -func (p *restoredProcess) setExternalDescriptors(newFds []string) { - p.fds = newFds -} - -// nonChildProcess represents a process where the calling process is not -// the parent process. This process is created when a factory loads a container from -// a persisted state. -type nonChildProcess struct { - processPid int - processStartTime string - fds []string -} - -func (p *nonChildProcess) start() error { - return newGenericError(fmt.Errorf("restored process cannot be started"), SystemError) -} - -func (p *nonChildProcess) pid() int { - return p.processPid -} - -func (p *nonChildProcess) terminate() error { - return newGenericError(fmt.Errorf("restored process cannot be terminated"), SystemError) -} - -func (p *nonChildProcess) wait() (*os.ProcessState, error) { - return nil, newGenericError(fmt.Errorf("restored process cannot be waited on"), SystemError) -} - -func (p *nonChildProcess) startTime() (string, error) { - return p.processStartTime, nil -} - -func (p *nonChildProcess) signal(s os.Signal) error { - return newGenericError(fmt.Errorf("restored process cannot be signaled"), SystemError) -} - -func (p *nonChildProcess) externalDescriptors() []string { - return p.fds -} - -func (p *nonChildProcess) setExternalDescriptors(newFds []string) { - p.fds = newFds -} diff --git a/vendor/github.com/docker/libcontainer/rootfs_linux.go b/vendor/github.com/docker/libcontainer/rootfs_linux.go deleted file mode 100644 index 0b0c3815..00000000 --- a/vendor/github.com/docker/libcontainer/rootfs_linux.go +++ /dev/null @@ -1,462 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/docker/docker/pkg/symlink" - "github.com/docker/libcontainer/cgroups" - "github.com/docker/libcontainer/configs" - "github.com/docker/libcontainer/label" -) - -const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV - -// setupRootfs sets up the devices, mount points, and filesystems for use inside a -// new mount namespace. -func setupRootfs(config *configs.Config, console *linuxConsole) (err error) { - if err := prepareRoot(config); err != nil { - return newSystemError(err) - } - for _, m := range config.Mounts { - for _, precmd := range m.PremountCmds { - if err := mountCmd(precmd); err != nil { - return newSystemError(err) - } - } - if err := mountToRootfs(m, config.Rootfs, config.MountLabel); err != nil { - return newSystemError(err) - } - - for _, postcmd := range m.PostmountCmds { - if err := mountCmd(postcmd); err != nil { - return newSystemError(err) - } - } - } - if err := createDevices(config); err != nil { - return newSystemError(err) - } - if err := setupPtmx(config, console); err != nil { - return newSystemError(err) - } - if err := setupDevSymlinks(config.Rootfs); err != nil { - return newSystemError(err) - } - if err := syscall.Chdir(config.Rootfs); err != nil { - return newSystemError(err) - } - if config.NoPivotRoot { - err = msMoveRoot(config.Rootfs) - } else { - err = pivotRoot(config.Rootfs, config.PivotDir) - } - if err != nil { - return newSystemError(err) - } - if err := reOpenDevNull(config.Rootfs); err != nil { - return newSystemError(err) - } - if config.Readonlyfs { - if err := setReadonly(); err != nil { - return newSystemError(err) - } - } - syscall.Umask(0022) - return nil -} - -func mountCmd(cmd configs.Command) error { - - command := exec.Command(cmd.Path, cmd.Args[:]...) - command.Env = cmd.Env - command.Dir = cmd.Dir - if out, err := command.CombinedOutput(); err != nil { - return fmt.Errorf("%#v failed: %s: %v", cmd, string(out), err) - } - - return nil -} - -func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error { - var ( - dest = m.Destination - data = label.FormatMountLabel(m.Data, mountLabel) - ) - if !strings.HasPrefix(dest, rootfs) { - dest = filepath.Join(rootfs, dest) - } - - switch m.Device { - case "proc", "sysfs": - if err := os.MkdirAll(dest, 0755); err != nil && !os.IsExist(err) { - return err - } - return syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags), "") - case "mqueue": - if err := os.MkdirAll(dest, 0755); err != nil && !os.IsExist(err) { - return err - } - if err := syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags), ""); err != nil { - return err - } - return label.SetFileLabel(dest, mountLabel) - case "tmpfs": - stat, err := os.Stat(dest) - if err != nil { - if err := os.MkdirAll(dest, 0755); err != nil && !os.IsExist(err) { - return err - } - } - if err := syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags), data); err != nil { - return err - } - if stat != nil { - if err = os.Chmod(dest, stat.Mode()); err != nil { - return err - } - } - return nil - case "devpts": - if err := os.MkdirAll(dest, 0755); err != nil && !os.IsExist(err) { - return err - } - return syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags), data) - case "bind": - stat, err := os.Stat(m.Source) - if err != nil { - // error out if the source of a bind mount does not exist as we will be - // unable to bind anything to it. - return err - } - // ensure that the destination of the bind mount is resolved of symlinks at mount time because - // any previous mounts can invalidate the next mount's destination. - // this can happen when a user specifies mounts within other mounts to cause breakouts or other - // evil stuff to try to escape the container's rootfs. - if dest, err = symlink.FollowSymlinkInScope(filepath.Join(rootfs, m.Destination), rootfs); err != nil { - return err - } - if err := checkMountDestination(rootfs, dest); err != nil { - return err - } - if err := createIfNotExists(dest, stat.IsDir()); err != nil { - return err - } - if err := syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags), data); err != nil { - return err - } - if m.Flags&syscall.MS_RDONLY != 0 { - if err := syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags|syscall.MS_REMOUNT), ""); err != nil { - return err - } - } - if m.Relabel != "" { - if err := label.Relabel(m.Source, mountLabel, m.Relabel); err != nil { - return err - } - } - if m.Flags&syscall.MS_PRIVATE != 0 { - if err := syscall.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil { - return err - } - } - case "cgroup": - mounts, err := cgroups.GetCgroupMounts() - if err != nil { - return err - } - var binds []*configs.Mount - for _, mm := range mounts { - dir, err := mm.GetThisCgroupDir() - if err != nil { - return err - } - binds = append(binds, &configs.Mount{ - Device: "bind", - Source: filepath.Join(mm.Mountpoint, dir), - Destination: filepath.Join(m.Destination, strings.Join(mm.Subsystems, ",")), - Flags: syscall.MS_BIND | syscall.MS_REC | syscall.MS_RDONLY, - }) - } - tmpfs := &configs.Mount{ - Device: "tmpfs", - Destination: m.Destination, - Flags: syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV, - } - if err := mountToRootfs(tmpfs, rootfs, mountLabel); err != nil { - return err - } - for _, b := range binds { - if err := mountToRootfs(b, rootfs, mountLabel); err != nil { - return err - } - } - default: - return fmt.Errorf("unknown mount device %q to %q", m.Device, m.Destination) - } - return nil -} - -// checkMountDestination checks to ensure that the mount destination is not over the -// top of /proc or /sys. -// dest is required to be an abs path and have any symlinks resolved before calling this function. -func checkMountDestination(rootfs, dest string) error { - if filepath.Clean(rootfs) == filepath.Clean(dest) { - return fmt.Errorf("mounting into / is prohibited") - } - invalidDestinations := []string{ - "/proc", - } - for _, invalid := range invalidDestinations { - path, err := filepath.Rel(filepath.Join(rootfs, invalid), dest) - if err != nil { - return err - } - if path == "." || !strings.HasPrefix(path, "..") { - return fmt.Errorf("%q cannot be mounted because it is located inside %q", dest, invalid) - } - } - return nil -} - -func setupDevSymlinks(rootfs string) error { - var links = [][2]string{ - {"/proc/self/fd", "/dev/fd"}, - {"/proc/self/fd/0", "/dev/stdin"}, - {"/proc/self/fd/1", "/dev/stdout"}, - {"/proc/self/fd/2", "/dev/stderr"}, - } - // kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink - // in /dev if it exists in /proc. - if _, err := os.Stat("/proc/kcore"); err == nil { - links = append(links, [2]string{"/proc/kcore", "/dev/kcore"}) - } - for _, link := range links { - var ( - src = link[0] - dst = filepath.Join(rootfs, link[1]) - ) - if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) { - return fmt.Errorf("symlink %s %s %s", src, dst, err) - } - } - return nil -} - -// If stdin, stdout, and/or stderr are pointing to `/dev/null` in the parent's rootfs -// this method will make them point to `/dev/null` in this container's rootfs. This -// needs to be called after we chroot/pivot into the container's rootfs so that any -// symlinks are resolved locally. -func reOpenDevNull(rootfs string) error { - var stat, devNullStat syscall.Stat_t - file, err := os.Open("/dev/null") - if err != nil { - return fmt.Errorf("Failed to open /dev/null - %s", err) - } - defer file.Close() - if err := syscall.Fstat(int(file.Fd()), &devNullStat); err != nil { - return err - } - for fd := 0; fd < 3; fd++ { - if err := syscall.Fstat(fd, &stat); err != nil { - return err - } - if stat.Rdev == devNullStat.Rdev { - // Close and re-open the fd. - if err := syscall.Dup3(int(file.Fd()), fd, 0); err != nil { - return err - } - } - } - return nil -} - -// Create the device nodes in the container. -func createDevices(config *configs.Config) error { - oldMask := syscall.Umask(0000) - for _, node := range config.Devices { - // containers running in a user namespace are not allowed to mknod - // devices so we can just bind mount it from the host. - if err := createDeviceNode(config.Rootfs, node, config.Namespaces.Contains(configs.NEWUSER)); err != nil { - syscall.Umask(oldMask) - return err - } - } - syscall.Umask(oldMask) - return nil -} - -// Creates the device node in the rootfs of the container. -func createDeviceNode(rootfs string, node *configs.Device, bind bool) error { - dest := filepath.Join(rootfs, node.Path) - if err := os.MkdirAll(filepath.Dir(dest), 0755); err != nil { - return err - } - - if bind { - f, err := os.Create(dest) - if err != nil && !os.IsExist(err) { - return err - } - if f != nil { - f.Close() - } - return syscall.Mount(node.Path, dest, "bind", syscall.MS_BIND, "") - } - if err := mknodDevice(dest, node); err != nil { - if os.IsExist(err) { - return nil - } - return err - } - return nil -} - -func mknodDevice(dest string, node *configs.Device) error { - fileMode := node.FileMode - switch node.Type { - case 'c': - fileMode |= syscall.S_IFCHR - case 'b': - fileMode |= syscall.S_IFBLK - default: - return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path) - } - if err := syscall.Mknod(dest, uint32(fileMode), node.Mkdev()); err != nil { - return err - } - return syscall.Chown(dest, int(node.Uid), int(node.Gid)) -} - -func prepareRoot(config *configs.Config) error { - flag := syscall.MS_SLAVE | syscall.MS_REC - if config.Privatefs { - flag = syscall.MS_PRIVATE | syscall.MS_REC - } - if err := syscall.Mount("", "/", "", uintptr(flag), ""); err != nil { - return err - } - return syscall.Mount(config.Rootfs, config.Rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, "") -} - -func setReadonly() error { - return syscall.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "") -} - -func setupPtmx(config *configs.Config, console *linuxConsole) error { - ptmx := filepath.Join(config.Rootfs, "dev/ptmx") - if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { - return err - } - if err := os.Symlink("pts/ptmx", ptmx); err != nil { - return fmt.Errorf("symlink dev ptmx %s", err) - } - if console != nil { - return console.mount(config.Rootfs, config.MountLabel, 0, 0) - } - return nil -} - -func pivotRoot(rootfs, pivotBaseDir string) error { - if pivotBaseDir == "" { - pivotBaseDir = "/" - } - tmpDir := filepath.Join(rootfs, pivotBaseDir) - if err := os.MkdirAll(tmpDir, 0755); err != nil { - return fmt.Errorf("can't create tmp dir %s, error %v", tmpDir, err) - } - pivotDir, err := ioutil.TempDir(tmpDir, ".pivot_root") - if err != nil { - return fmt.Errorf("can't create pivot_root dir %s, error %v", pivotDir, err) - } - if err := syscall.PivotRoot(rootfs, pivotDir); err != nil { - return fmt.Errorf("pivot_root %s", err) - } - if err := syscall.Chdir("/"); err != nil { - return fmt.Errorf("chdir / %s", err) - } - // path to pivot dir now changed, update - pivotDir = filepath.Join(pivotBaseDir, filepath.Base(pivotDir)) - if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { - return fmt.Errorf("unmount pivot_root dir %s", err) - } - return os.Remove(pivotDir) -} - -func msMoveRoot(rootfs string) error { - if err := syscall.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { - return err - } - if err := syscall.Chroot("."); err != nil { - return err - } - return syscall.Chdir("/") -} - -// createIfNotExists creates a file or a directory only if it does not already exist. -func createIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - if isDir { - return os.MkdirAll(path, 0755) - } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } - return nil -} - -// remountReadonly will bind over the top of an existing path and ensure that it is read-only. -func remountReadonly(path string) error { - for i := 0; i < 5; i++ { - if err := syscall.Mount("", path, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil && !os.IsNotExist(err) { - switch err { - case syscall.EINVAL: - // Probably not a mountpoint, use bind-mount - if err := syscall.Mount(path, path, "", syscall.MS_BIND, ""); err != nil { - return err - } - return syscall.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC|defaultMountFlags, "") - case syscall.EBUSY: - time.Sleep(100 * time.Millisecond) - continue - default: - return err - } - } - return nil - } - return fmt.Errorf("unable to mount %s as readonly max retries reached", path) -} - -// maskFile bind mounts /dev/null over the top of the specified path inside a container -// to avoid security issues from processes reading information from non-namespace aware mounts ( proc/kcore ). -func maskFile(path string) error { - if err := syscall.Mount("/dev/null", path, "", syscall.MS_BIND, ""); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// writeSystemProperty writes the value to a path under /proc/sys as determined from the key. -// For e.g. net.ipv4.ip_forward translated to /proc/sys/net/ipv4/ip_forward. -func writeSystemProperty(key, value string) error { - keyPath := strings.Replace(key, ".", "/", -1) - return ioutil.WriteFile(path.Join("/proc/sys", keyPath), []byte(value), 0644) -} diff --git a/vendor/github.com/docker/libcontainer/rootfs_linux_test.go b/vendor/github.com/docker/libcontainer/rootfs_linux_test.go deleted file mode 100644 index a3bb0770..00000000 --- a/vendor/github.com/docker/libcontainer/rootfs_linux_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build linux - -package libcontainer - -import "testing" - -func TestCheckMountDestOnProc(t *testing.T) { - dest := "/rootfs/proc/" - err := checkMountDestination("/rootfs", dest) - if err == nil { - t.Fatal("destination inside proc should return an error") - } -} - -func TestCheckMountDestInSys(t *testing.T) { - dest := "/rootfs//sys/fs/cgroup" - err := checkMountDestination("/rootfs", dest) - if err != nil { - t.Fatal("destination inside /sys should not return an error") - } -} - -func TestCheckMountDestFalsePositive(t *testing.T) { - dest := "/rootfs/sysfiles/fs/cgroup" - err := checkMountDestination("/rootfs", dest) - if err != nil { - t.Fatal(err) - } -} - -func TestCheckMountRoot(t *testing.T) { - dest := "/rootfs" - err := checkMountDestination("/rootfs", dest) - if err == nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/libcontainer/setns_init_linux.go b/vendor/github.com/docker/libcontainer/setns_init_linux.go deleted file mode 100644 index f77219d2..00000000 --- a/vendor/github.com/docker/libcontainer/setns_init_linux.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "os" - - "github.com/docker/libcontainer/apparmor" - "github.com/docker/libcontainer/label" - "github.com/docker/libcontainer/system" -) - -// linuxSetnsInit performs the container's initialization for running a new process -// inside an existing container. -type linuxSetnsInit struct { - config *initConfig -} - -func (l *linuxSetnsInit) Init() error { - if err := setupRlimits(l.config.Config); err != nil { - return err - } - if err := finalizeNamespace(l.config); err != nil { - return err - } - if err := apparmor.ApplyProfile(l.config.Config.AppArmorProfile); err != nil { - return err - } - if l.config.Config.ProcessLabel != "" { - if err := label.SetProcessLabel(l.config.Config.ProcessLabel); err != nil { - return err - } - } - return system.Execv(l.config.Args[0], l.config.Args[0:], os.Environ()) -} diff --git a/vendor/github.com/docker/libcontainer/standard_init_linux.go b/vendor/github.com/docker/libcontainer/standard_init_linux.go deleted file mode 100644 index 445c1fa2..00000000 --- a/vendor/github.com/docker/libcontainer/standard_init_linux.go +++ /dev/null @@ -1,106 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "os" - "syscall" - - "github.com/docker/libcontainer/apparmor" - "github.com/docker/libcontainer/configs" - "github.com/docker/libcontainer/label" - "github.com/docker/libcontainer/system" -) - -type linuxStandardInit struct { - parentPid int - config *initConfig -} - -func (l *linuxStandardInit) Init() error { - // join any namespaces via a path to the namespace fd if provided - if err := joinExistingNamespaces(l.config.Config.Namespaces); err != nil { - return err - } - var console *linuxConsole - if l.config.Console != "" { - console = newConsoleFromPath(l.config.Console) - if err := console.dupStdio(); err != nil { - return err - } - } - if _, err := syscall.Setsid(); err != nil { - return err - } - if console != nil { - if err := system.Setctty(); err != nil { - return err - } - } - if err := setupNetwork(l.config); err != nil { - return err - } - if err := setupRoute(l.config.Config); err != nil { - return err - } - if err := setupRlimits(l.config.Config); err != nil { - return err - } - label.Init() - // InitializeMountNamespace() can be executed only for a new mount namespace - if l.config.Config.Namespaces.Contains(configs.NEWNS) { - if err := setupRootfs(l.config.Config, console); err != nil { - return err - } - } - if hostname := l.config.Config.Hostname; hostname != "" { - if err := syscall.Sethostname([]byte(hostname)); err != nil { - return err - } - } - if err := apparmor.ApplyProfile(l.config.Config.AppArmorProfile); err != nil { - return err - } - if err := label.SetProcessLabel(l.config.Config.ProcessLabel); err != nil { - return err - } - - for key, value := range l.config.Config.SystemProperties { - if err := writeSystemProperty(key, value); err != nil { - return err - } - } - - for _, path := range l.config.Config.ReadonlyPaths { - if err := remountReadonly(path); err != nil { - return err - } - } - for _, path := range l.config.Config.MaskPaths { - if err := maskFile(path); err != nil { - return err - } - } - pdeath, err := system.GetParentDeathSignal() - if err != nil { - return err - } - if err := finalizeNamespace(l.config); err != nil { - return err - } - // finalizeNamespace can change user/group which clears the parent death - // signal, so we restore it here. - if err := pdeath.Restore(); err != nil { - return err - } - // compare the parent from the inital start of the init process and make sure that it did not change. - // if the parent changes that means it died and we were reparened to something else so we should - // just kill ourself and not cause problems for someone else. - if syscall.Getppid() != l.parentPid { - return syscall.Kill(syscall.Getpid(), syscall.SIGKILL) - } - if err := finalizeSeccomp(l.config); err != nil { - return err - } - return system.Execv(l.config.Args[0], l.config.Args[0:], os.Environ()) -} diff --git a/vendor/github.com/docker/libcontainer/stats.go b/vendor/github.com/docker/libcontainer/stats.go deleted file mode 100644 index 303e4b94..00000000 --- a/vendor/github.com/docker/libcontainer/stats.go +++ /dev/null @@ -1,15 +0,0 @@ -package libcontainer - -type NetworkInterface struct { - // Name is the name of the network interface. - Name string - - RxBytes uint64 - RxPackets uint64 - RxErrors uint64 - RxDropped uint64 - TxBytes uint64 - TxPackets uint64 - TxErrors uint64 - TxDropped uint64 -} diff --git a/vendor/github.com/docker/libcontainer/stats_freebsd.go b/vendor/github.com/docker/libcontainer/stats_freebsd.go deleted file mode 100644 index f8d1d689..00000000 --- a/vendor/github.com/docker/libcontainer/stats_freebsd.go +++ /dev/null @@ -1,5 +0,0 @@ -package libcontainer - -type Stats struct { - Interfaces []*NetworkInterface -} diff --git a/vendor/github.com/docker/libcontainer/stats_linux.go b/vendor/github.com/docker/libcontainer/stats_linux.go deleted file mode 100644 index 8ce96978..00000000 --- a/vendor/github.com/docker/libcontainer/stats_linux.go +++ /dev/null @@ -1,8 +0,0 @@ -package libcontainer - -import "github.com/docker/libcontainer/cgroups" - -type Stats struct { - Interfaces []*NetworkInterface - CgroupStats *cgroups.Stats -} diff --git a/vendor/github.com/docker/libcontainer/stats_windows.go b/vendor/github.com/docker/libcontainer/stats_windows.go deleted file mode 100644 index f8d1d689..00000000 --- a/vendor/github.com/docker/libcontainer/stats_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package libcontainer - -type Stats struct { - Interfaces []*NetworkInterface -} diff --git a/vendor/github.com/docker/libcontainer/update-vendor.sh b/vendor/github.com/docker/libcontainer/update-vendor.sh deleted file mode 100755 index 69b6e50c..00000000 --- a/vendor/github.com/docker/libcontainer/update-vendor.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash -set -e - -cd "$(dirname "$BASH_SOURCE")" - -# Downloads dependencies into vendor/ directory -mkdir -p vendor -cd vendor - -clone() { - vcs=$1 - pkg=$2 - rev=$3 - - pkg_url=https://$pkg - target_dir=src/$pkg - - echo -n "$pkg @ $rev: " - - if [ -d $target_dir ]; then - echo -n 'rm old, ' - rm -fr $target_dir - fi - - echo -n 'clone, ' - case $vcs in - git) - git clone --quiet --no-checkout $pkg_url $target_dir - ( cd $target_dir && git reset --quiet --hard $rev ) - ;; - hg) - hg clone --quiet --updaterev $rev $pkg_url $target_dir - ;; - esac - - echo -n 'rm VCS, ' - ( cd $target_dir && rm -rf .{git,hg} ) - - echo done -} - -# the following lines are in sorted order, FYI -clone git github.com/codegangsta/cli 1.1.0 -clone git github.com/coreos/go-systemd v2 -clone git github.com/godbus/dbus v2 -clone git github.com/Sirupsen/logrus v0.7.3 -clone git github.com/syndtr/gocapability 66ef2aa -clone git github.com/golang/protobuf 655cdfa588ea - -# intentionally not vendoring Docker itself... that'd be a circle :) diff --git a/vendor/github.com/docker/libnetwork/controller.go b/vendor/github.com/docker/libnetwork/controller.go deleted file mode 100644 index 7efc4093..00000000 --- a/vendor/github.com/docker/libnetwork/controller.go +++ /dev/null @@ -1,732 +0,0 @@ -/* -Package libnetwork provides the basic functionality and extension points to -create network namespaces and allocate interfaces for containers to use. - - networkType := "bridge" - - // Create a new controller instance - driverOptions := options.Generic{} - genericOption := make(map[string]interface{}) - genericOption[netlabel.GenericData] = driverOptions - controller, err := libnetwork.New(config.OptionDriverConfig(networkType, genericOption)) - if err != nil { - return - } - - // Create a network for containers to join. - // NewNetwork accepts Variadic optional arguments that libnetwork and Drivers can make use of - network, err := controller.NewNetwork(networkType, "network1") - if err != nil { - return - } - - // For each new container: allocate IP and interfaces. The returned network - // settings will be used for container infos (inspect and such), as well as - // iptables rules for port publishing. This info is contained or accessible - // from the returned endpoint. - ep, err := network.CreateEndpoint("Endpoint1") - if err != nil { - return - } - - // Create the sandbox for the container. - // NewSandbox accepts Variadic optional arguments which libnetwork can use. - sbx, err := controller.NewSandbox("container1", - libnetwork.OptionHostname("test"), - libnetwork.OptionDomainname("docker.io")) - - // A sandbox can join the endpoint via the join api. - err = ep.Join(sbx) - if err != nil { - return - } -*/ -package libnetwork - -import ( - "container/heap" - "fmt" - "net" - "strings" - "sync" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/discovery" - "github.com/docker/docker/pkg/plugins" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/libnetwork/config" - "github.com/docker/libnetwork/datastore" - "github.com/docker/libnetwork/driverapi" - "github.com/docker/libnetwork/hostdiscovery" - "github.com/docker/libnetwork/ipamapi" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/osl" - "github.com/docker/libnetwork/types" -) - -// NetworkController provides the interface for controller instance which manages -// networks. -type NetworkController interface { - // ID provides an unique identity for the controller - ID() string - - // Config method returns the bootup configuration for the controller - Config() config.Config - - // Create a new network. The options parameter carries network specific options. - NewNetwork(networkType, name string, options ...NetworkOption) (Network, error) - - // Networks returns the list of Network(s) managed by this controller. - Networks() []Network - - // WalkNetworks uses the provided function to walk the Network(s) managed by this controller. - WalkNetworks(walker NetworkWalker) - - // NetworkByName returns the Network which has the passed name. If not found, the error ErrNoSuchNetwork is returned. - NetworkByName(name string) (Network, error) - - // NetworkByID returns the Network which has the passed id. If not found, the error ErrNoSuchNetwork is returned. - NetworkByID(id string) (Network, error) - - // NewSandbox cretes a new network sandbox for the passed container id - NewSandbox(containerID string, options ...SandboxOption) (Sandbox, error) - - // Sandboxes returns the list of Sandbox(s) managed by this controller. - Sandboxes() []Sandbox - - // WlakSandboxes uses the provided function to walk the Sandbox(s) managed by this controller. - WalkSandboxes(walker SandboxWalker) - - // SandboxByID returns the Sandbox which has the passed id. If not found, a types.NotFoundError is returned. - SandboxByID(id string) (Sandbox, error) - - // SandboxDestroy destroys a sandbox given a container ID - SandboxDestroy(id string) error - - // Stop network controller - Stop() -} - -// NetworkWalker is a client provided function which will be used to walk the Networks. -// When the function returns true, the walk will stop. -type NetworkWalker func(nw Network) bool - -// SandboxWalker is a client provided function which will be used to walk the Sandboxes. -// When the function returns true, the walk will stop. -type SandboxWalker func(sb Sandbox) bool - -type driverData struct { - driver driverapi.Driver - capability driverapi.Capability -} - -type ipamData struct { - driver ipamapi.Ipam - capability *ipamapi.Capability - // default address spaces are provided by ipam driver at registration time - defaultLocalAddressSpace, defaultGlobalAddressSpace string -} - -type driverTable map[string]*driverData - -type ipamTable map[string]*ipamData -type sandboxTable map[string]*sandbox - -type controller struct { - id string - drivers driverTable - ipamDrivers ipamTable - sandboxes sandboxTable - cfg *config.Config - stores []datastore.DataStore - discovery hostdiscovery.HostDiscovery - extKeyListener net.Listener - watchCh chan *endpoint - unWatchCh chan *endpoint - svcDb map[string]svcInfo - nmap map[string]*netWatch - defOsSbox osl.Sandbox - sboxOnce sync.Once - sync.Mutex -} - -// New creates a new instance of network controller. -func New(cfgOptions ...config.Option) (NetworkController, error) { - var cfg *config.Config - cfg = &config.Config{ - Daemon: config.DaemonCfg{ - DriverCfg: make(map[string]interface{}), - }, - Scopes: make(map[string]*datastore.ScopeCfg), - } - - if len(cfgOptions) > 0 { - cfg.ProcessOptions(cfgOptions...) - } - cfg.LoadDefaultScopes(cfg.Daemon.DataDir) - - c := &controller{ - id: stringid.GenerateRandomID(), - cfg: cfg, - sandboxes: sandboxTable{}, - drivers: driverTable{}, - ipamDrivers: ipamTable{}, - svcDb: make(map[string]svcInfo), - } - - if err := c.initStores(); err != nil { - return nil, err - } - - if cfg != nil && cfg.Cluster.Watcher != nil { - if err := c.initDiscovery(cfg.Cluster.Watcher); err != nil { - // Failing to initalize discovery is a bad situation to be in. - // But it cannot fail creating the Controller - log.Errorf("Failed to Initialize Discovery : %v", err) - } - } - - if err := initDrivers(c); err != nil { - return nil, err - } - - if err := initIpams(c, c.getStore(datastore.LocalScope), - c.getStore(datastore.GlobalScope)); err != nil { - return nil, err - } - - c.sandboxCleanup() - c.cleanupLocalEndpoints() - - if err := c.startExternalKeyListener(); err != nil { - return nil, err - } - - return c, nil -} - -func (c *controller) ID() string { - return c.id -} - -func (c *controller) validateHostDiscoveryConfig() bool { - if c.cfg == nil || c.cfg.Cluster.Discovery == "" || c.cfg.Cluster.Address == "" { - return false - } - return true -} - -func (c *controller) clusterHostID() string { - c.Lock() - defer c.Unlock() - if c.cfg == nil || c.cfg.Cluster.Address == "" { - return "" - } - addr := strings.Split(c.cfg.Cluster.Address, ":") - return addr[0] -} - -func (c *controller) isNodeAlive(node string) bool { - if c.discovery == nil { - return false - } - - nodes := c.discovery.Fetch() - for _, n := range nodes { - if n.String() == node { - return true - } - } - - return false -} - -func (c *controller) initDiscovery(watcher discovery.Watcher) error { - if c.cfg == nil { - return fmt.Errorf("discovery initialization requires a valid configuration") - } - - c.discovery = hostdiscovery.NewHostDiscovery(watcher) - return c.discovery.Watch(c.activeCallback, c.hostJoinCallback, c.hostLeaveCallback) -} - -func (c *controller) activeCallback() { - ds := c.getStore(datastore.GlobalScope) - if ds != nil && !ds.Active() { - ds.RestartWatch() - } -} - -func (c *controller) hostJoinCallback(nodes []net.IP) { - c.processNodeDiscovery(nodes, true) -} - -func (c *controller) hostLeaveCallback(nodes []net.IP) { - c.processNodeDiscovery(nodes, false) -} - -func (c *controller) processNodeDiscovery(nodes []net.IP, add bool) { - c.Lock() - drivers := []*driverData{} - for _, d := range c.drivers { - drivers = append(drivers, d) - } - c.Unlock() - - for _, d := range drivers { - c.pushNodeDiscovery(d, nodes, add) - } -} - -func (c *controller) pushNodeDiscovery(d *driverData, nodes []net.IP, add bool) { - var self net.IP - if c.cfg != nil { - addr := strings.Split(c.cfg.Cluster.Address, ":") - self = net.ParseIP(addr[0]) - } - if d == nil || d.capability.DataScope != datastore.GlobalScope || nodes == nil { - return - } - for _, node := range nodes { - nodeData := driverapi.NodeDiscoveryData{Address: node.String(), Self: node.Equal(self)} - var err error - if add { - err = d.driver.DiscoverNew(driverapi.NodeDiscovery, nodeData) - } else { - err = d.driver.DiscoverDelete(driverapi.NodeDiscovery, nodeData) - } - if err != nil { - log.Debugf("discovery notification error : %v", err) - } - } -} - -func (c *controller) Config() config.Config { - c.Lock() - defer c.Unlock() - if c.cfg == nil { - return config.Config{} - } - return *c.cfg -} - -func (c *controller) RegisterDriver(networkType string, driver driverapi.Driver, capability driverapi.Capability) error { - if !config.IsValidName(networkType) { - return ErrInvalidName(networkType) - } - - c.Lock() - if _, ok := c.drivers[networkType]; ok { - c.Unlock() - return driverapi.ErrActiveRegistration(networkType) - } - dData := &driverData{driver, capability} - c.drivers[networkType] = dData - hd := c.discovery - c.Unlock() - - if hd != nil { - c.pushNodeDiscovery(dData, hd.Fetch(), true) - } - - return nil -} - -func (c *controller) registerIpamDriver(name string, driver ipamapi.Ipam, caps *ipamapi.Capability) error { - if !config.IsValidName(name) { - return ErrInvalidName(name) - } - - c.Lock() - _, ok := c.ipamDrivers[name] - c.Unlock() - if ok { - return types.ForbiddenErrorf("ipam driver %q already registered", name) - } - locAS, glbAS, err := driver.GetDefaultAddressSpaces() - if err != nil { - return types.InternalErrorf("ipam driver %q failed to return default address spaces: %v", name, err) - } - c.Lock() - c.ipamDrivers[name] = &ipamData{driver: driver, defaultLocalAddressSpace: locAS, defaultGlobalAddressSpace: glbAS, capability: caps} - c.Unlock() - - log.Debugf("Registering ipam driver: %q", name) - - return nil -} - -func (c *controller) RegisterIpamDriver(name string, driver ipamapi.Ipam) error { - return c.registerIpamDriver(name, driver, &ipamapi.Capability{}) -} - -func (c *controller) RegisterIpamDriverWithCapabilities(name string, driver ipamapi.Ipam, caps *ipamapi.Capability) error { - return c.registerIpamDriver(name, driver, caps) -} - -// NewNetwork creates a new network of the specified network type. The options -// are network specific and modeled in a generic way. -func (c *controller) NewNetwork(networkType, name string, options ...NetworkOption) (Network, error) { - if !config.IsValidName(name) { - return nil, ErrInvalidName(name) - } - - // Construct the network object - network := &network{ - name: name, - networkType: networkType, - generic: map[string]interface{}{netlabel.GenericData: make(map[string]string)}, - ipamType: ipamapi.DefaultIPAM, - id: stringid.GenerateRandomID(), - ctrlr: c, - persist: true, - drvOnce: &sync.Once{}, - } - - network.processOptions(options...) - - // Make sure we have a driver available for this network type - // before we allocate anything. - if _, err := network.driver(); err != nil { - return nil, err - } - - err := network.ipamAllocate() - if err != nil { - return nil, err - } - defer func() { - if err != nil { - network.ipamRelease() - } - }() - - if err = c.addNetwork(network); err != nil { - return nil, err - } - defer func() { - if err != nil { - if e := network.deleteNetwork(); e != nil { - log.Warnf("couldn't roll back driver network on network %s creation failure: %v", network.name, err) - } - } - }() - - if err = c.updateToStore(network); err != nil { - return nil, err - } - defer func() { - if err != nil { - if e := c.deleteFromStore(network); e != nil { - log.Warnf("couldnt rollback from store, network %s on failure (%v): %v", network.name, err, e) - } - } - }() - - network.epCnt = &endpointCnt{n: network} - if err = c.updateToStore(network.epCnt); err != nil { - return nil, err - } - - return network, nil -} - -func (c *controller) addNetwork(n *network) error { - d, err := n.driver() - if err != nil { - return err - } - - // Create the network - if err := d.CreateNetwork(n.id, n.generic, n.getIPData(4), n.getIPData(6)); err != nil { - return err - } - - return nil -} - -func (c *controller) Networks() []Network { - var list []Network - - networks, err := c.getNetworksFromStore() - if err != nil { - log.Error(err) - } - - for _, n := range networks { - list = append(list, n) - } - - return list -} - -func (c *controller) WalkNetworks(walker NetworkWalker) { - for _, n := range c.Networks() { - if walker(n) { - return - } - } -} - -func (c *controller) NetworkByName(name string) (Network, error) { - if name == "" { - return nil, ErrInvalidName(name) - } - var n Network - - s := func(current Network) bool { - if current.Name() == name { - n = current - return true - } - return false - } - - c.WalkNetworks(s) - - if n == nil { - return nil, ErrNoSuchNetwork(name) - } - - return n, nil -} - -func (c *controller) NetworkByID(id string) (Network, error) { - if id == "" { - return nil, ErrInvalidID(id) - } - - n, err := c.getNetworkFromStore(id) - if err != nil { - return nil, ErrNoSuchNetwork(id) - } - - return n, nil -} - -// NewSandbox creates a new sandbox for the passed container id -func (c *controller) NewSandbox(containerID string, options ...SandboxOption) (Sandbox, error) { - var err error - - if containerID == "" { - return nil, types.BadRequestErrorf("invalid container ID") - } - - var sb *sandbox - c.Lock() - for _, s := range c.sandboxes { - if s.containerID == containerID { - // If not a stub, then we already have a complete sandbox. - if !s.isStub { - c.Unlock() - return nil, types.BadRequestErrorf("container %s is already present: %v", containerID, s) - } - - // We already have a stub sandbox from the - // store. Make use of it so that we don't lose - // the endpoints from store but reset the - // isStub flag. - sb = s - sb.isStub = false - break - } - } - c.Unlock() - - // Create sandbox and process options first. Key generation depends on an option - if sb == nil { - sb = &sandbox{ - id: stringid.GenerateRandomID(), - containerID: containerID, - endpoints: epHeap{}, - epPriority: map[string]int{}, - config: containerConfig{}, - controller: c, - } - } - - heap.Init(&sb.endpoints) - - sb.processOptions(options...) - - if err = sb.setupResolutionFiles(); err != nil { - return nil, err - } - - if sb.config.useDefaultSandBox { - c.sboxOnce.Do(func() { - c.defOsSbox, err = osl.NewSandbox(sb.Key(), false) - }) - - if err != nil { - c.sboxOnce = sync.Once{} - return nil, fmt.Errorf("failed to create default sandbox: %v", err) - } - - sb.osSbox = c.defOsSbox - } - - if sb.osSbox == nil && !sb.config.useExternalKey { - if sb.osSbox, err = osl.NewSandbox(sb.Key(), !sb.config.useDefaultSandBox); err != nil { - return nil, fmt.Errorf("failed to create new osl sandbox: %v", err) - } - } - - c.Lock() - c.sandboxes[sb.id] = sb - c.Unlock() - defer func() { - if err != nil { - c.Lock() - delete(c.sandboxes, sb.id) - c.Unlock() - } - }() - - err = sb.storeUpdate() - if err != nil { - return nil, fmt.Errorf("updating the store state of sandbox failed: %v", err) - } - - return sb, nil -} - -func (c *controller) Sandboxes() []Sandbox { - c.Lock() - defer c.Unlock() - - list := make([]Sandbox, 0, len(c.sandboxes)) - for _, s := range c.sandboxes { - // Hide stub sandboxes from libnetwork users - if s.isStub { - continue - } - - list = append(list, s) - } - - return list -} - -func (c *controller) WalkSandboxes(walker SandboxWalker) { - for _, sb := range c.Sandboxes() { - if walker(sb) { - return - } - } -} - -func (c *controller) SandboxByID(id string) (Sandbox, error) { - if id == "" { - return nil, ErrInvalidID(id) - } - c.Lock() - s, ok := c.sandboxes[id] - c.Unlock() - if !ok { - return nil, types.NotFoundErrorf("sandbox %s not found", id) - } - return s, nil -} - -// SandboxDestroy destroys a sandbox given a container ID -func (c *controller) SandboxDestroy(id string) error { - var sb *sandbox - c.Lock() - for _, s := range c.sandboxes { - if s.containerID == id { - sb = s - break - } - } - c.Unlock() - - // It is not an error if sandbox is not available - if sb == nil { - return nil - } - - return sb.Delete() -} - -// SandboxContainerWalker returns a Sandbox Walker function which looks for an existing Sandbox with the passed containerID -func SandboxContainerWalker(out *Sandbox, containerID string) SandboxWalker { - return func(sb Sandbox) bool { - if sb.ContainerID() == containerID { - *out = sb - return true - } - return false - } -} - -// SandboxKeyWalker returns a Sandbox Walker function which looks for an existing Sandbox with the passed key -func SandboxKeyWalker(out *Sandbox, key string) SandboxWalker { - return func(sb Sandbox) bool { - if sb.Key() == key { - *out = sb - return true - } - return false - } -} - -func (c *controller) loadDriver(networkType string) (*driverData, error) { - // Plugins pkg performs lazy loading of plugins that acts as remote drivers. - // As per the design, this Get call will result in remote driver discovery if there is a corresponding plugin available. - _, err := plugins.Get(networkType, driverapi.NetworkPluginEndpointType) - if err != nil { - if err == plugins.ErrNotFound { - return nil, types.NotFoundErrorf(err.Error()) - } - return nil, err - } - c.Lock() - defer c.Unlock() - dd, ok := c.drivers[networkType] - if !ok { - return nil, ErrInvalidNetworkDriver(networkType) - } - return dd, nil -} - -func (c *controller) loadIpamDriver(name string) (*ipamData, error) { - if _, err := plugins.Get(name, ipamapi.PluginEndpointType); err != nil { - if err == plugins.ErrNotFound { - return nil, types.NotFoundErrorf(err.Error()) - } - return nil, err - } - c.Lock() - id, ok := c.ipamDrivers[name] - c.Unlock() - if !ok { - return nil, types.BadRequestErrorf("invalid ipam driver: %q", name) - } - return id, nil -} - -func (c *controller) getIPAM(name string) (id *ipamData, err error) { - var ok bool - c.Lock() - id, ok = c.ipamDrivers[name] - c.Unlock() - if !ok { - id, err = c.loadIpamDriver(name) - } - return id, err -} - -func (c *controller) getIpamDriver(name string) (ipamapi.Ipam, error) { - id, err := c.getIPAM(name) - if err != nil { - return nil, err - } - return id.driver, nil -} - -func (c *controller) Stop() { - c.closeStores() - c.stopExternalKeyListener() - osl.GC() -} diff --git a/vendor/github.com/docker/libnetwork/default_gateway.go b/vendor/github.com/docker/libnetwork/default_gateway.go deleted file mode 100644 index bfd7b725..00000000 --- a/vendor/github.com/docker/libnetwork/default_gateway.go +++ /dev/null @@ -1,152 +0,0 @@ -package libnetwork - -import ( - "fmt" - - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/types" -) - -const ( - libnGWNetwork = "docker_gwbridge" - gwEPlen = 12 -) - -/* - libnetwork creates a bridge network "docker_gw_bridge" for provding - default gateway for the containers if none of the container's endpoints - have GW set by the driver. ICC is set to false for the GW_bridge network. - - If a driver can't provide external connectivity it can choose to not set - the GW IP for the endpoint. - - endpoint on the GW_bridge network is managed dynamically by libnetwork. - ie: - - its created when an endpoint without GW joins the container - - its deleted when an endpoint with GW joins the container -*/ - -func (sb *sandbox) setupDefaultGW(srcEp *endpoint) error { - var createOptions []EndpointOption - c := srcEp.getNetwork().getController() - - // check if the conitainer already has a GW endpoint - if ep := sb.getEndpointInGWNetwork(); ep != nil { - return nil - } - - n, err := c.NetworkByName(libnGWNetwork) - if err != nil { - if _, ok := err.(types.NotFoundError); !ok { - return err - } - n, err = c.createGWNetwork() - if err != nil { - return err - } - } - - if opt, ok := srcEp.generic[netlabel.PortMap]; ok { - if pb, ok := opt.([]types.PortBinding); ok { - createOptions = append(createOptions, CreateOptionPortMapping(pb)) - } - } - - if opt, ok := srcEp.generic[netlabel.ExposedPorts]; ok { - if exp, ok := opt.([]types.TransportPort); ok { - createOptions = append(createOptions, CreateOptionExposedPorts(exp)) - } - } - - createOptions = append(createOptions, CreateOptionAnonymous()) - - eplen := gwEPlen - if len(sb.containerID) < gwEPlen { - eplen = len(sb.containerID) - } - - newEp, err := n.CreateEndpoint("gateway_"+sb.containerID[0:eplen], createOptions...) - if err != nil { - return fmt.Errorf("container %s: endpoint create on GW Network failed: %v", sb.containerID, err) - } - epLocal := newEp.(*endpoint) - - if err := epLocal.sbJoin(sb); err != nil { - return fmt.Errorf("container %s: endpoint join on GW Network failed: %v", sb.containerID, err) - } - return nil -} - -func (sb *sandbox) clearDefaultGW() error { - var ep *endpoint - - if ep = sb.getEndpointInGWNetwork(); ep == nil { - return nil - } - - if err := ep.sbLeave(sb); err != nil { - return fmt.Errorf("container %s: endpoint leaving GW Network failed: %v", sb.containerID, err) - } - if err := ep.Delete(false); err != nil { - return fmt.Errorf("container %s: deleting endpoint on GW Network failed: %v", sb.containerID, err) - } - return nil -} - -func (sb *sandbox) needDefaultGW() bool { - var needGW bool - - for _, ep := range sb.getConnectedEndpoints() { - if ep.endpointInGWNetwork() { - continue - } - if ep.getNetwork().Type() == "null" || ep.getNetwork().Type() == "host" { - continue - } - if ep.getNetwork().Internal() { - return false - } - if ep.joinInfo.disableGatewayService { - return false - } - // TODO v6 needs to be handled. - if len(ep.Gateway()) > 0 { - return false - } - for _, r := range ep.StaticRoutes() { - if r.Destination.String() == "0.0.0.0/0" { - return false - } - } - needGW = true - } - return needGW -} - -func (sb *sandbox) getEndpointInGWNetwork() *endpoint { - for _, ep := range sb.getConnectedEndpoints() { - if ep.getNetwork().name == libnGWNetwork { - return ep - } - } - return nil -} - -func (ep *endpoint) endpointInGWNetwork() bool { - if ep.getNetwork().name == libnGWNetwork { - return true - } - return false -} - -func (sb *sandbox) getEPwithoutGateway() *endpoint { - for _, ep := range sb.getConnectedEndpoints() { - if ep.getNetwork().Type() == "null" || ep.getNetwork().Type() == "host" { - continue - } - if len(ep.Gateway()) == 0 { - return ep - } - } - return nil -} diff --git a/vendor/github.com/docker/libnetwork/default_gateway_freebsd.go b/vendor/github.com/docker/libnetwork/default_gateway_freebsd.go deleted file mode 100644 index a8268739..00000000 --- a/vendor/github.com/docker/libnetwork/default_gateway_freebsd.go +++ /dev/null @@ -1,7 +0,0 @@ -package libnetwork - -import "github.com/docker/libnetwork/types" - -func (c *controller) createGWNetwork() (Network, error) { - return nil, types.NotImplementedErrorf("default gateway functionality is not implemented in freebsd") -} diff --git a/vendor/github.com/docker/libnetwork/default_gateway_linux.go b/vendor/github.com/docker/libnetwork/default_gateway_linux.go deleted file mode 100644 index c7f81277..00000000 --- a/vendor/github.com/docker/libnetwork/default_gateway_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -package libnetwork - -import ( - "fmt" - "strconv" - - "github.com/docker/libnetwork/drivers/bridge" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" -) - -func (c *controller) createGWNetwork() (Network, error) { - netOption := map[string]string{ - bridge.BridgeName: libnGWNetwork, - bridge.EnableICC: strconv.FormatBool(false), - bridge.EnableIPMasquerade: strconv.FormatBool(true), - } - - n, err := c.NewNetwork("bridge", libnGWNetwork, - NetworkOptionGeneric(options.Generic{ - netlabel.GenericData: netOption, - netlabel.EnableIPv6: false, - })) - - if err != nil { - return nil, fmt.Errorf("error creating external connectivity network: %v", err) - } - return n, err -} diff --git a/vendor/github.com/docker/libnetwork/default_gateway_windows.go b/vendor/github.com/docker/libnetwork/default_gateway_windows.go deleted file mode 100644 index 48eee2ba..00000000 --- a/vendor/github.com/docker/libnetwork/default_gateway_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package libnetwork - -import "github.com/docker/libnetwork/types" - -func (c *controller) createGWNetwork() (Network, error) { - return nil, types.NotImplementedErrorf("default gateway functionality is not implemented in windows") -} diff --git a/vendor/github.com/docker/libnetwork/drivers.go b/vendor/github.com/docker/libnetwork/drivers.go deleted file mode 100644 index 1b11f373..00000000 --- a/vendor/github.com/docker/libnetwork/drivers.go +++ /dev/null @@ -1,78 +0,0 @@ -package libnetwork - -import ( - "strings" - - "github.com/docker/libnetwork/driverapi" - "github.com/docker/libnetwork/ipamapi" - builtinIpam "github.com/docker/libnetwork/ipams/builtin" - remoteIpam "github.com/docker/libnetwork/ipams/remote" - "github.com/docker/libnetwork/netlabel" -) - -type initializer struct { - fn func(driverapi.DriverCallback, map[string]interface{}) error - ntype string -} - -func initDrivers(c *controller) error { - for _, i := range getInitializers() { - if err := i.fn(c, makeDriverConfig(c, i.ntype)); err != nil { - return err - } - } - - return nil -} - -func makeDriverConfig(c *controller, ntype string) map[string]interface{} { - if c.cfg == nil { - return nil - } - - config := make(map[string]interface{}) - - for _, label := range c.cfg.Daemon.Labels { - if !strings.HasPrefix(netlabel.Key(label), netlabel.DriverPrefix+"."+ntype) { - continue - } - - config[netlabel.Key(label)] = netlabel.Value(label) - } - - drvCfg, ok := c.cfg.Daemon.DriverCfg[ntype] - if ok { - for k, v := range drvCfg.(map[string]interface{}) { - config[k] = v - } - } - - // We don't send datastore configs to external plugins - if ntype == "remote" { - return config - } - - for k, v := range c.cfg.Scopes { - if !v.IsValid() { - continue - } - - config[netlabel.MakeKVProvider(k)] = v.Client.Provider - config[netlabel.MakeKVProviderURL(k)] = v.Client.Address - config[netlabel.MakeKVProviderConfig(k)] = v.Client.Config - } - - return config -} - -func initIpams(ic ipamapi.Callback, lDs, gDs interface{}) error { - for _, fn := range [](func(ipamapi.Callback, interface{}, interface{}) error){ - builtinIpam.Init, - remoteIpam.Init, - } { - if err := fn(ic, lDs, gDs); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/docker/libnetwork/drivers_freebsd.go b/vendor/github.com/docker/libnetwork/drivers_freebsd.go deleted file mode 100644 index 416bba04..00000000 --- a/vendor/github.com/docker/libnetwork/drivers_freebsd.go +++ /dev/null @@ -1,13 +0,0 @@ -package libnetwork - -import ( - "github.com/docker/libnetwork/drivers/null" - "github.com/docker/libnetwork/drivers/remote" -) - -func getInitializers() []initializer { - return []initializer{ - {null.Init, "null"}, - {remote.Init, "remote"}, - } -} diff --git a/vendor/github.com/docker/libnetwork/drivers_linux.go b/vendor/github.com/docker/libnetwork/drivers_linux.go deleted file mode 100644 index 4d31b986..00000000 --- a/vendor/github.com/docker/libnetwork/drivers_linux.go +++ /dev/null @@ -1,19 +0,0 @@ -package libnetwork - -import ( - "github.com/docker/libnetwork/drivers/bridge" - "github.com/docker/libnetwork/drivers/host" - "github.com/docker/libnetwork/drivers/null" - "github.com/docker/libnetwork/drivers/overlay" - "github.com/docker/libnetwork/drivers/remote" -) - -func getInitializers() []initializer { - return []initializer{ - {bridge.Init, "bridge"}, - {host.Init, "host"}, - {null.Init, "null"}, - {remote.Init, "remote"}, - {overlay.Init, "overlay"}, - } -} diff --git a/vendor/github.com/docker/libnetwork/drivers_windows.go b/vendor/github.com/docker/libnetwork/drivers_windows.go deleted file mode 100644 index 314ff8d5..00000000 --- a/vendor/github.com/docker/libnetwork/drivers_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package libnetwork - -import "github.com/docker/libnetwork/drivers/windows" - -func getInitializers() []initializer { - return []initializer{ - {windows.Init, "windows"}, - } -} diff --git a/vendor/github.com/docker/libnetwork/endpoint.go b/vendor/github.com/docker/libnetwork/endpoint.go deleted file mode 100644 index 88312e9c..00000000 --- a/vendor/github.com/docker/libnetwork/endpoint.go +++ /dev/null @@ -1,964 +0,0 @@ -package libnetwork - -import ( - "container/heap" - "encoding/json" - "fmt" - "net" - "strings" - "sync" - - log "github.com/Sirupsen/logrus" - "github.com/docker/libnetwork/datastore" - "github.com/docker/libnetwork/ipamapi" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" - "github.com/docker/libnetwork/types" -) - -// Endpoint represents a logical connection between a network and a sandbox. -type Endpoint interface { - // A system generated id for this endpoint. - ID() string - - // Name returns the name of this endpoint. - Name() string - - // Network returns the name of the network to which this endpoint is attached. - Network() string - - // Join joins the sandbox to the endpoint and populates into the sandbox - // the network resources allocated for the endpoint. - Join(sandbox Sandbox, options ...EndpointOption) error - - // Leave detaches the network resources populated in the sandbox. - Leave(sandbox Sandbox, options ...EndpointOption) error - - // Return certain operational data belonging to this endpoint - Info() EndpointInfo - - // DriverInfo returns a collection of driver operational data related to this endpoint retrieved from the driver - DriverInfo() (map[string]interface{}, error) - - // Delete and detaches this endpoint from the network. - Delete(force bool) error -} - -// EndpointOption is a option setter function type used to pass varios options to Network -// and Endpoint interfaces methods. The various setter functions of type EndpointOption are -// provided by libnetwork, they look like Option[...](...) -type EndpointOption func(ep *endpoint) - -type endpoint struct { - name string - id string - network *network - iface *endpointInterface - joinInfo *endpointJoinInfo - sandboxID string - locator string - exposedPorts []types.TransportPort - anonymous bool - disableResolution bool - generic map[string]interface{} - joinLeaveDone chan struct{} - prefAddress net.IP - prefAddressV6 net.IP - ipamOptions map[string]string - aliases map[string]string - myAliases []string - dbIndex uint64 - dbExists bool - sync.Mutex -} - -func (ep *endpoint) MarshalJSON() ([]byte, error) { - ep.Lock() - defer ep.Unlock() - - epMap := make(map[string]interface{}) - epMap["name"] = ep.name - epMap["id"] = ep.id - epMap["ep_iface"] = ep.iface - epMap["exposed_ports"] = ep.exposedPorts - if ep.generic != nil { - epMap["generic"] = ep.generic - } - epMap["sandbox"] = ep.sandboxID - epMap["locator"] = ep.locator - epMap["anonymous"] = ep.anonymous - epMap["disableResolution"] = ep.disableResolution - epMap["myAliases"] = ep.myAliases - return json.Marshal(epMap) -} - -func (ep *endpoint) UnmarshalJSON(b []byte) (err error) { - ep.Lock() - defer ep.Unlock() - - var epMap map[string]interface{} - if err := json.Unmarshal(b, &epMap); err != nil { - return err - } - ep.name = epMap["name"].(string) - ep.id = epMap["id"].(string) - - ib, _ := json.Marshal(epMap["ep_iface"]) - json.Unmarshal(ib, &ep.iface) - - tb, _ := json.Marshal(epMap["exposed_ports"]) - var tPorts []types.TransportPort - json.Unmarshal(tb, &tPorts) - ep.exposedPorts = tPorts - - cb, _ := json.Marshal(epMap["sandbox"]) - json.Unmarshal(cb, &ep.sandboxID) - - if v, ok := epMap["generic"]; ok { - ep.generic = v.(map[string]interface{}) - - if opt, ok := ep.generic[netlabel.PortMap]; ok { - pblist := []types.PortBinding{} - - for i := 0; i < len(opt.([]interface{})); i++ { - pb := types.PortBinding{} - tmp := opt.([]interface{})[i].(map[string]interface{}) - - bytes, err := json.Marshal(tmp) - if err != nil { - log.Error(err) - break - } - err = json.Unmarshal(bytes, &pb) - if err != nil { - log.Error(err) - break - } - pblist = append(pblist, pb) - } - ep.generic[netlabel.PortMap] = pblist - } - - if opt, ok := ep.generic[netlabel.ExposedPorts]; ok { - tplist := []types.TransportPort{} - - for i := 0; i < len(opt.([]interface{})); i++ { - tp := types.TransportPort{} - tmp := opt.([]interface{})[i].(map[string]interface{}) - - bytes, err := json.Marshal(tmp) - if err != nil { - log.Error(err) - break - } - err = json.Unmarshal(bytes, &tp) - if err != nil { - log.Error(err) - break - } - tplist = append(tplist, tp) - } - ep.generic[netlabel.ExposedPorts] = tplist - - } - } - - if v, ok := epMap["anonymous"]; ok { - ep.anonymous = v.(bool) - } - if v, ok := epMap["disableResolution"]; ok { - ep.disableResolution = v.(bool) - } - if l, ok := epMap["locator"]; ok { - ep.locator = l.(string) - } - ma, _ := json.Marshal(epMap["myAliases"]) - var myAliases []string - json.Unmarshal(ma, &myAliases) - ep.myAliases = myAliases - return nil -} - -func (ep *endpoint) New() datastore.KVObject { - return &endpoint{network: ep.getNetwork()} -} - -func (ep *endpoint) CopyTo(o datastore.KVObject) error { - ep.Lock() - defer ep.Unlock() - - dstEp := o.(*endpoint) - dstEp.name = ep.name - dstEp.id = ep.id - dstEp.sandboxID = ep.sandboxID - dstEp.locator = ep.locator - dstEp.dbIndex = ep.dbIndex - dstEp.dbExists = ep.dbExists - dstEp.anonymous = ep.anonymous - dstEp.disableResolution = ep.disableResolution - - if ep.iface != nil { - dstEp.iface = &endpointInterface{} - ep.iface.CopyTo(dstEp.iface) - } - - dstEp.exposedPorts = make([]types.TransportPort, len(ep.exposedPorts)) - copy(dstEp.exposedPorts, ep.exposedPorts) - - dstEp.myAliases = make([]string, len(ep.myAliases)) - copy(dstEp.myAliases, ep.myAliases) - - dstEp.generic = options.Generic{} - for k, v := range ep.generic { - dstEp.generic[k] = v - } - - return nil -} - -func (ep *endpoint) ID() string { - ep.Lock() - defer ep.Unlock() - - return ep.id -} - -func (ep *endpoint) Name() string { - ep.Lock() - defer ep.Unlock() - - return ep.name -} - -func (ep *endpoint) MyAliases() []string { - ep.Lock() - defer ep.Unlock() - - return ep.myAliases -} - -func (ep *endpoint) Network() string { - if ep.network == nil { - return "" - } - - return ep.network.name -} - -func (ep *endpoint) isAnonymous() bool { - ep.Lock() - defer ep.Unlock() - return ep.anonymous -} - -func (ep *endpoint) needResolver() bool { - ep.Lock() - defer ep.Unlock() - return !ep.disableResolution -} - -// endpoint Key structure : endpoint/network-id/endpoint-id -func (ep *endpoint) Key() []string { - if ep.network == nil { - return nil - } - - return []string{datastore.EndpointKeyPrefix, ep.network.id, ep.id} -} - -func (ep *endpoint) KeyPrefix() []string { - if ep.network == nil { - return nil - } - - return []string{datastore.EndpointKeyPrefix, ep.network.id} -} - -func (ep *endpoint) networkIDFromKey(key string) (string, error) { - // endpoint Key structure : docker/libnetwork/endpoint/${network-id}/${endpoint-id} - // it's an invalid key if the key doesn't have all the 5 key elements above - keyElements := strings.Split(key, "/") - if !strings.HasPrefix(key, datastore.Key(datastore.EndpointKeyPrefix)) || len(keyElements) < 5 { - return "", fmt.Errorf("invalid endpoint key : %v", key) - } - // network-id is placed at index=3. pls refer to endpoint.Key() method - return strings.Split(key, "/")[3], nil -} - -func (ep *endpoint) Value() []byte { - b, err := json.Marshal(ep) - if err != nil { - return nil - } - return b -} - -func (ep *endpoint) SetValue(value []byte) error { - return json.Unmarshal(value, ep) -} - -func (ep *endpoint) Index() uint64 { - ep.Lock() - defer ep.Unlock() - return ep.dbIndex -} - -func (ep *endpoint) SetIndex(index uint64) { - ep.Lock() - defer ep.Unlock() - ep.dbIndex = index - ep.dbExists = true -} - -func (ep *endpoint) Exists() bool { - ep.Lock() - defer ep.Unlock() - return ep.dbExists -} - -func (ep *endpoint) Skip() bool { - return ep.getNetwork().Skip() -} - -func (ep *endpoint) processOptions(options ...EndpointOption) { - ep.Lock() - defer ep.Unlock() - - for _, opt := range options { - if opt != nil { - opt(ep) - } - } -} - -func (ep *endpoint) getNetwork() *network { - ep.Lock() - defer ep.Unlock() - - return ep.network -} - -func (ep *endpoint) getNetworkFromStore() (*network, error) { - if ep.network == nil { - return nil, fmt.Errorf("invalid network object in endpoint %s", ep.Name()) - } - - return ep.network.ctrlr.getNetworkFromStore(ep.network.id) -} - -func (ep *endpoint) Join(sbox Sandbox, options ...EndpointOption) error { - if sbox == nil { - return types.BadRequestErrorf("endpoint cannot be joined by nil container") - } - - sb, ok := sbox.(*sandbox) - if !ok { - return types.BadRequestErrorf("not a valid Sandbox interface") - } - - sb.joinLeaveStart() - defer sb.joinLeaveEnd() - - return ep.sbJoin(sbox, options...) -} - -func (ep *endpoint) sbJoin(sbox Sandbox, options ...EndpointOption) error { - var err error - sb, ok := sbox.(*sandbox) - if !ok { - return types.BadRequestErrorf("not a valid Sandbox interface") - } - - network, err := ep.getNetworkFromStore() - if err != nil { - return fmt.Errorf("failed to get network from store during join: %v", err) - } - - ep, err = network.getEndpointFromStore(ep.ID()) - if err != nil { - return fmt.Errorf("failed to get endpoint from store during join: %v", err) - } - - ep.Lock() - if ep.sandboxID != "" { - ep.Unlock() - return types.ForbiddenErrorf("another container is attached to the same network endpoint") - } - ep.Unlock() - - ep.Lock() - ep.network = network - ep.sandboxID = sbox.ID() - ep.joinInfo = &endpointJoinInfo{} - epid := ep.id - ep.Unlock() - defer func() { - if err != nil { - ep.Lock() - ep.sandboxID = "" - ep.Unlock() - } - }() - - network.Lock() - nid := network.id - network.Unlock() - - ep.processOptions(options...) - - driver, err := network.driver() - if err != nil { - return fmt.Errorf("failed to join endpoint: %v", err) - } - - err = driver.Join(nid, epid, sbox.Key(), ep, sbox.Labels()) - if err != nil { - return err - } - defer func() { - if err != nil { - // Do not alter global err variable, it's needed by the previous defer - if err := driver.Leave(nid, epid); err != nil { - log.Warnf("driver leave failed while rolling back join: %v", err) - } - } - }() - - // Watch for service records - network.getController().watchSvcRecord(ep) - - address := "" - if ip := ep.getFirstInterfaceAddress(); ip != nil { - address = ip.String() - } - if err = sb.updateHostsFile(address); err != nil { - return err - } - if err = sb.updateDNS(network.enableIPv6); err != nil { - return err - } - - if err = network.getController().updateToStore(ep); err != nil { - return err - } - - sb.Lock() - heap.Push(&sb.endpoints, ep) - sb.Unlock() - defer func() { - if err != nil { - for i, e := range sb.getConnectedEndpoints() { - if e == ep { - sb.Lock() - heap.Remove(&sb.endpoints, i) - sb.Unlock() - return - } - } - } - }() - - if err = sb.populateNetworkResources(ep); err != nil { - return err - } - - if sb.needDefaultGW() { - return sb.setupDefaultGW(ep) - } - return sb.clearDefaultGW() -} - -func (ep *endpoint) rename(name string) error { - var err error - n := ep.getNetwork() - if n == nil { - return fmt.Errorf("network not connected for ep %q", ep.name) - } - - n.getController().Lock() - netWatch, ok := n.getController().nmap[n.ID()] - n.getController().Unlock() - - if !ok { - return fmt.Errorf("watch null for network %q", n.Name()) - } - - n.updateSvcRecord(ep, n.getController().getLocalEps(netWatch), false) - - oldName := ep.name - ep.name = name - - n.updateSvcRecord(ep, n.getController().getLocalEps(netWatch), true) - defer func() { - if err != nil { - n.updateSvcRecord(ep, n.getController().getLocalEps(netWatch), false) - ep.name = oldName - n.updateSvcRecord(ep, n.getController().getLocalEps(netWatch), true) - } - }() - - // Update the store with the updated name - if err = n.getController().updateToStore(ep); err != nil { - return err - } - // After the name change do a dummy endpoint count update to - // trigger the service record update in the peer nodes - - // Ignore the error because updateStore fail for EpCnt is a - // benign error. Besides there is no meaningful recovery that - // we can do. When the cluster recovers subsequent EpCnt update - // will force the peers to get the correct EP name. - n.getEpCnt().updateStore() - - return err -} - -func (ep *endpoint) hasInterface(iName string) bool { - ep.Lock() - defer ep.Unlock() - - return ep.iface != nil && ep.iface.srcName == iName -} - -func (ep *endpoint) Leave(sbox Sandbox, options ...EndpointOption) error { - if sbox == nil || sbox.ID() == "" || sbox.Key() == "" { - return types.BadRequestErrorf("invalid Sandbox passed to enpoint leave: %v", sbox) - } - - sb, ok := sbox.(*sandbox) - if !ok { - return types.BadRequestErrorf("not a valid Sandbox interface") - } - - sb.joinLeaveStart() - defer sb.joinLeaveEnd() - - return ep.sbLeave(sbox, options...) -} - -func (ep *endpoint) sbLeave(sbox Sandbox, options ...EndpointOption) error { - sb, ok := sbox.(*sandbox) - if !ok { - return types.BadRequestErrorf("not a valid Sandbox interface") - } - - n, err := ep.getNetworkFromStore() - if err != nil { - return fmt.Errorf("failed to get network from store during leave: %v", err) - } - - ep, err = n.getEndpointFromStore(ep.ID()) - if err != nil { - return fmt.Errorf("failed to get endpoint from store during leave: %v", err) - } - - ep.Lock() - sid := ep.sandboxID - ep.Unlock() - - if sid == "" { - return types.ForbiddenErrorf("cannot leave endpoint with no attached sandbox") - } - if sid != sbox.ID() { - return types.ForbiddenErrorf("unexpected sandbox ID in leave request. Expected %s. Got %s", ep.sandboxID, sbox.ID()) - } - - ep.processOptions(options...) - - d, err := n.driver() - if err != nil { - return fmt.Errorf("failed to leave endpoint: %v", err) - } - - ep.Lock() - ep.sandboxID = "" - ep.network = n - ep.Unlock() - - if err := d.Leave(n.id, ep.id); err != nil { - if _, ok := err.(types.MaskableError); !ok { - log.Warnf("driver error disconnecting container %s : %v", ep.name, err) - } - } - - if err := sb.clearNetworkResources(ep); err != nil { - log.Warnf("Could not cleanup network resources on container %s disconnect: %v", ep.name, err) - } - - // Update the store about the sandbox detach only after we - // have completed sb.clearNetworkresources above to avoid - // spurious logs when cleaning up the sandbox when the daemon - // ungracefully exits and restarts before completing sandbox - // detach but after store has been updated. - if err := n.getController().updateToStore(ep); err != nil { - return err - } - - sb.deleteHostsEntries(n.getSvcRecords(ep)) - - if !sb.inDelete && sb.needDefaultGW() { - ep := sb.getEPwithoutGateway() - if ep == nil { - return fmt.Errorf("endpoint without GW expected, but not found") - } - return sb.setupDefaultGW(ep) - } - return sb.clearDefaultGW() -} - -func (n *network) validateForceDelete(locator string) error { - if n.Scope() == datastore.LocalScope { - return nil - } - - if locator == "" { - return fmt.Errorf("invalid endpoint locator identifier") - } - - return nil -} - -func (ep *endpoint) Delete(force bool) error { - var err error - n, err := ep.getNetworkFromStore() - if err != nil { - return fmt.Errorf("failed to get network during Delete: %v", err) - } - - ep, err = n.getEndpointFromStore(ep.ID()) - if err != nil { - return fmt.Errorf("failed to get endpoint from store during Delete: %v", err) - } - - ep.Lock() - epid := ep.id - name := ep.name - sbid := ep.sandboxID - locator := ep.locator - ep.Unlock() - - if force { - if err = n.validateForceDelete(locator); err != nil { - return fmt.Errorf("unable to force delete endpoint %s: %v", name, err) - } - } - - sb, _ := n.getController().SandboxByID(sbid) - if sb != nil && !force { - return &ActiveContainerError{name: name, id: epid} - } - - if sb != nil { - if e := ep.sbLeave(sb); e != nil { - log.Warnf("failed to leave sandbox for endpoint %s : %v", name, e) - } - } - - if err = n.getController().deleteFromStore(ep); err != nil { - return err - } - - defer func() { - if err != nil && !force { - ep.dbExists = false - if e := n.getController().updateToStore(ep); e != nil { - log.Warnf("failed to recreate endpoint in store %s : %v", name, e) - } - } - }() - - if err = n.getEpCnt().DecEndpointCnt(); err != nil && !force { - return err - } - defer func() { - if err != nil && !force { - if e := n.getEpCnt().IncEndpointCnt(); e != nil { - log.Warnf("failed to update network %s : %v", n.name, e) - } - } - }() - - // unwatch for service records - n.getController().unWatchSvcRecord(ep) - - if err = ep.deleteEndpoint(); err != nil && !force { - return err - } - - ep.releaseAddress() - - return nil -} - -func (ep *endpoint) deleteEndpoint() error { - ep.Lock() - n := ep.network - name := ep.name - epid := ep.id - ep.Unlock() - - driver, err := n.driver() - if err != nil { - return fmt.Errorf("failed to delete endpoint: %v", err) - } - - if err := driver.DeleteEndpoint(n.id, epid); err != nil { - if _, ok := err.(types.ForbiddenError); ok { - return err - } - - if _, ok := err.(types.MaskableError); !ok { - log.Warnf("driver error deleting endpoint %s : %v", name, err) - } - } - - return nil -} - -func (ep *endpoint) getSandbox() (*sandbox, bool) { - c := ep.network.getController() - ep.Lock() - sid := ep.sandboxID - ep.Unlock() - - c.Lock() - ps, ok := c.sandboxes[sid] - c.Unlock() - - return ps, ok -} - -func (ep *endpoint) getFirstInterfaceAddress() net.IP { - ep.Lock() - defer ep.Unlock() - - if ep.iface.addr != nil { - return ep.iface.addr.IP - } - - return nil -} - -// EndpointOptionGeneric function returns an option setter for a Generic option defined -// in a Dictionary of Key-Value pair -func EndpointOptionGeneric(generic map[string]interface{}) EndpointOption { - return func(ep *endpoint) { - for k, v := range generic { - ep.generic[k] = v - } - } -} - -// CreateOptionIpam function returns an option setter for the ipam configuration for this endpoint -func CreateOptionIpam(ipV4, ipV6 net.IP, ipamOptions map[string]string) EndpointOption { - return func(ep *endpoint) { - ep.prefAddress = ipV4 - ep.prefAddressV6 = ipV6 - ep.ipamOptions = ipamOptions - } -} - -// CreateOptionExposedPorts function returns an option setter for the container exposed -// ports option to be passed to network.CreateEndpoint() method. -func CreateOptionExposedPorts(exposedPorts []types.TransportPort) EndpointOption { - return func(ep *endpoint) { - // Defensive copy - eps := make([]types.TransportPort, len(exposedPorts)) - copy(eps, exposedPorts) - // Store endpoint label and in generic because driver needs it - ep.exposedPorts = eps - ep.generic[netlabel.ExposedPorts] = eps - } -} - -// CreateOptionPortMapping function returns an option setter for the mapping -// ports option to be passed to network.CreateEndpoint() method. -func CreateOptionPortMapping(portBindings []types.PortBinding) EndpointOption { - return func(ep *endpoint) { - // Store a copy of the bindings as generic data to pass to the driver - pbs := make([]types.PortBinding, len(portBindings)) - copy(pbs, portBindings) - ep.generic[netlabel.PortMap] = pbs - } -} - -// CreateOptionAnonymous function returns an option setter for setting -// this endpoint as anonymous -func CreateOptionAnonymous() EndpointOption { - return func(ep *endpoint) { - ep.anonymous = true - } -} - -// CreateOptionDisableResolution function returns an option setter to indicate -// this endpoint doesn't want embedded DNS server functionality -func CreateOptionDisableResolution() EndpointOption { - return func(ep *endpoint) { - ep.disableResolution = true - } -} - -//CreateOptionAlias function returns an option setter for setting endpoint alias -func CreateOptionAlias(name string, alias string) EndpointOption { - return func(ep *endpoint) { - if ep.aliases == nil { - ep.aliases = make(map[string]string) - } - ep.aliases[alias] = name - } -} - -//CreateOptionMyAlias function returns an option setter for setting endpoint's self alias -func CreateOptionMyAlias(alias string) EndpointOption { - return func(ep *endpoint) { - ep.myAliases = append(ep.myAliases, alias) - } -} - -// JoinOptionPriority function returns an option setter for priority option to -// be passed to the endpoint.Join() method. -func JoinOptionPriority(ep Endpoint, prio int) EndpointOption { - return func(ep *endpoint) { - // ep lock already acquired - c := ep.network.getController() - c.Lock() - sb, ok := c.sandboxes[ep.sandboxID] - c.Unlock() - if !ok { - log.Errorf("Could not set endpoint priority value during Join to endpoint %s: No sandbox id present in endpoint", ep.id) - return - } - sb.epPriority[ep.id] = prio - } -} - -func (ep *endpoint) DataScope() string { - return ep.getNetwork().DataScope() -} - -func (ep *endpoint) assignAddress(ipam ipamapi.Ipam, assignIPv4, assignIPv6 bool) error { - var err error - - n := ep.getNetwork() - if n.Type() == "host" || n.Type() == "null" { - return nil - } - - log.Debugf("Assigning addresses for endpoint %s's interface on network %s", ep.Name(), n.Name()) - - if assignIPv4 { - if err = ep.assignAddressVersion(4, ipam); err != nil { - return err - } - } - - if assignIPv6 { - err = ep.assignAddressVersion(6, ipam) - } - - return err -} - -func (ep *endpoint) assignAddressVersion(ipVer int, ipam ipamapi.Ipam) error { - var ( - poolID *string - address **net.IPNet - prefAdd net.IP - progAdd net.IP - ) - - n := ep.getNetwork() - switch ipVer { - case 4: - poolID = &ep.iface.v4PoolID - address = &ep.iface.addr - prefAdd = ep.prefAddress - case 6: - poolID = &ep.iface.v6PoolID - address = &ep.iface.addrv6 - prefAdd = ep.prefAddressV6 - default: - return types.InternalErrorf("incorrect ip version number passed: %d", ipVer) - } - - ipInfo := n.getIPInfo(ipVer) - - // ipv6 address is not mandatory - if len(ipInfo) == 0 && ipVer == 6 { - return nil - } - - // The address to program may be chosen by the user or by the network driver in one specific - // case to support backward compatibility with `docker daemon --fixed-cidrv6` use case - if prefAdd != nil { - progAdd = prefAdd - } else if *address != nil { - progAdd = (*address).IP - } - - for _, d := range ipInfo { - if progAdd != nil && !d.Pool.Contains(progAdd) { - continue - } - addr, _, err := ipam.RequestAddress(d.PoolID, progAdd, ep.ipamOptions) - if err == nil { - ep.Lock() - *address = addr - *poolID = d.PoolID - ep.Unlock() - return nil - } - if err != ipamapi.ErrNoAvailableIPs || progAdd != nil { - return err - } - } - if progAdd != nil { - return types.BadRequestErrorf("Invalid preferred address %s: It does not belong to any of this network's subnets") - } - return fmt.Errorf("no available IPv%d addresses on this network's address pools: %s (%s)", ipVer, n.Name(), n.ID()) -} - -func (ep *endpoint) releaseAddress() { - n := ep.getNetwork() - if n.Type() == "host" || n.Type() == "null" { - return - } - - log.Debugf("Releasing addresses for endpoint %s's interface on network %s", ep.Name(), n.Name()) - - ipam, err := n.getController().getIpamDriver(n.ipamType) - if err != nil { - log.Warnf("Failed to retrieve ipam driver to release interface address on delete of endpoint %s (%s): %v", ep.Name(), ep.ID(), err) - return - } - if err := ipam.ReleaseAddress(ep.iface.v4PoolID, ep.iface.addr.IP); err != nil { - log.Warnf("Failed to release ip address %s on delete of endpoint %s (%s): %v", ep.iface.addr.IP, ep.Name(), ep.ID(), err) - } - if ep.iface.addrv6 != nil && ep.iface.addrv6.IP.IsGlobalUnicast() { - if err := ipam.ReleaseAddress(ep.iface.v6PoolID, ep.iface.addrv6.IP); err != nil { - log.Warnf("Failed to release ip address %s on delete of endpoint %s (%s): %v", ep.iface.addrv6.IP, ep.Name(), ep.ID(), err) - } - } -} - -func (c *controller) cleanupLocalEndpoints() { - nl, err := c.getNetworksForScope(datastore.LocalScope) - if err != nil { - log.Warnf("Could not get list of networks during endpoint cleanup: %v", err) - return - } - - for _, n := range nl { - epl, err := n.getEndpointsFromStore() - if err != nil { - log.Warnf("Could not get list of endpoints in network %s during endpoint cleanup: %v", n.name, err) - continue - } - - for _, ep := range epl { - if err := ep.Delete(false); err != nil { - log.Warnf("Could not delete local endpoint %s during endpoint cleanup: %v", ep.name, err) - } - } - } -} diff --git a/vendor/github.com/docker/libnetwork/endpoint_cnt.go b/vendor/github.com/docker/libnetwork/endpoint_cnt.go deleted file mode 100644 index 507de393..00000000 --- a/vendor/github.com/docker/libnetwork/endpoint_cnt.go +++ /dev/null @@ -1,162 +0,0 @@ -package libnetwork - -import ( - "encoding/json" - "fmt" - "sync" - - "github.com/docker/libnetwork/datastore" -) - -type endpointCnt struct { - n *network - Count uint64 - dbIndex uint64 - dbExists bool - sync.Mutex -} - -const epCntKeyPrefix = "endpoint_count" - -func (ec *endpointCnt) Key() []string { - ec.Lock() - defer ec.Unlock() - - return []string{epCntKeyPrefix, ec.n.id} -} - -func (ec *endpointCnt) KeyPrefix() []string { - ec.Lock() - defer ec.Unlock() - - return []string{epCntKeyPrefix, ec.n.id} -} - -func (ec *endpointCnt) Value() []byte { - ec.Lock() - defer ec.Unlock() - - b, err := json.Marshal(ec) - if err != nil { - return nil - } - return b -} - -func (ec *endpointCnt) SetValue(value []byte) error { - ec.Lock() - defer ec.Unlock() - - return json.Unmarshal(value, &ec) -} - -func (ec *endpointCnt) Index() uint64 { - ec.Lock() - defer ec.Unlock() - return ec.dbIndex -} - -func (ec *endpointCnt) SetIndex(index uint64) { - ec.Lock() - ec.dbIndex = index - ec.dbExists = true - ec.Unlock() -} - -func (ec *endpointCnt) Exists() bool { - ec.Lock() - defer ec.Unlock() - return ec.dbExists -} - -func (ec *endpointCnt) Skip() bool { - ec.Lock() - defer ec.Unlock() - return !ec.n.persist -} - -func (ec *endpointCnt) New() datastore.KVObject { - ec.Lock() - defer ec.Unlock() - - return &endpointCnt{ - n: ec.n, - } -} - -func (ec *endpointCnt) CopyTo(o datastore.KVObject) error { - ec.Lock() - defer ec.Unlock() - - dstEc := o.(*endpointCnt) - dstEc.n = ec.n - dstEc.Count = ec.Count - dstEc.dbExists = ec.dbExists - dstEc.dbIndex = ec.dbIndex - - return nil -} - -func (ec *endpointCnt) DataScope() string { - return ec.n.DataScope() -} - -func (ec *endpointCnt) EndpointCnt() uint64 { - ec.Lock() - defer ec.Unlock() - - return ec.Count -} - -func (ec *endpointCnt) updateStore() error { - store := ec.n.getController().getStore(ec.DataScope()) - if store == nil { - return fmt.Errorf("store not found for scope %s on endpoint count update", ec.DataScope()) - } - for { - if err := ec.n.getController().updateToStore(ec); err == nil || err != datastore.ErrKeyModified { - return err - } - if err := store.GetObject(datastore.Key(ec.Key()...), ec); err != nil { - return fmt.Errorf("could not update the kvobject to latest on endpoint count update: %v", err) - } - } -} - -func (ec *endpointCnt) atomicIncDecEpCnt(inc bool) error { -retry: - ec.Lock() - if inc { - ec.Count++ - } else { - ec.Count-- - } - ec.Unlock() - - store := ec.n.getController().getStore(ec.DataScope()) - if store == nil { - return fmt.Errorf("store not found for scope %s", ec.DataScope()) - } - - if err := ec.n.getController().updateToStore(ec); err != nil { - if err == datastore.ErrKeyModified { - if err := store.GetObject(datastore.Key(ec.Key()...), ec); err != nil { - return fmt.Errorf("could not update the kvobject to latest when trying to atomic add endpoint count: %v", err) - } - - goto retry - } - - return err - } - - return nil -} - -func (ec *endpointCnt) IncEndpointCnt() error { - return ec.atomicIncDecEpCnt(true) -} - -func (ec *endpointCnt) DecEndpointCnt() error { - return ec.atomicIncDecEpCnt(false) -} diff --git a/vendor/github.com/docker/libnetwork/endpoint_info.go b/vendor/github.com/docker/libnetwork/endpoint_info.go deleted file mode 100644 index 624bc533..00000000 --- a/vendor/github.com/docker/libnetwork/endpoint_info.go +++ /dev/null @@ -1,365 +0,0 @@ -package libnetwork - -import ( - "encoding/json" - "fmt" - "net" - - "github.com/docker/libnetwork/driverapi" - "github.com/docker/libnetwork/types" -) - -// EndpointInfo provides an interface to retrieve network resources bound to the endpoint. -type EndpointInfo interface { - // Iface returns InterfaceInfo, go interface that can be used - // to get more information on the interface which was assigned to - // the endpoint by the driver. This can be used after the - // endpoint has been created. - Iface() InterfaceInfo - - // Gateway returns the IPv4 gateway assigned by the driver. - // This will only return a valid value if a container has joined the endpoint. - Gateway() net.IP - - // GatewayIPv6 returns the IPv6 gateway assigned by the driver. - // This will only return a valid value if a container has joined the endpoint. - GatewayIPv6() net.IP - - // StaticRoutes returns the list of static routes configured by the network - // driver when the container joins a network - StaticRoutes() []*types.StaticRoute - - // Sandbox returns the attached sandbox if there, nil otherwise. - Sandbox() Sandbox -} - -// InterfaceInfo provides an interface to retrieve interface addresses bound to the endpoint. -type InterfaceInfo interface { - // MacAddress returns the MAC address assigned to the endpoint. - MacAddress() net.HardwareAddr - - // Address returns the IPv4 address assigned to the endpoint. - Address() *net.IPNet - - // AddressIPv6 returns the IPv6 address assigned to the endpoint. - AddressIPv6() *net.IPNet -} - -type endpointInterface struct { - mac net.HardwareAddr - addr *net.IPNet - addrv6 *net.IPNet - srcName string - dstPrefix string - routes []*net.IPNet - v4PoolID string - v6PoolID string -} - -func (epi *endpointInterface) MarshalJSON() ([]byte, error) { - epMap := make(map[string]interface{}) - if epi.mac != nil { - epMap["mac"] = epi.mac.String() - } - if epi.addr != nil { - epMap["addr"] = epi.addr.String() - } - if epi.addrv6 != nil { - epMap["addrv6"] = epi.addrv6.String() - } - epMap["srcName"] = epi.srcName - epMap["dstPrefix"] = epi.dstPrefix - var routes []string - for _, route := range epi.routes { - routes = append(routes, route.String()) - } - epMap["routes"] = routes - epMap["v4PoolID"] = epi.v4PoolID - epMap["v6PoolID"] = epi.v6PoolID - return json.Marshal(epMap) -} - -func (epi *endpointInterface) UnmarshalJSON(b []byte) error { - var ( - err error - epMap map[string]interface{} - ) - if err = json.Unmarshal(b, &epMap); err != nil { - return err - } - if v, ok := epMap["mac"]; ok { - if epi.mac, err = net.ParseMAC(v.(string)); err != nil { - return types.InternalErrorf("failed to decode endpoint interface mac address after json unmarshal: %s", v.(string)) - } - } - if v, ok := epMap["addr"]; ok { - if epi.addr, err = types.ParseCIDR(v.(string)); err != nil { - return types.InternalErrorf("failed to decode endpoint interface ipv4 address after json unmarshal: %v", err) - } - } - if v, ok := epMap["addrv6"]; ok { - if epi.addrv6, err = types.ParseCIDR(v.(string)); err != nil { - return types.InternalErrorf("failed to decode endpoint interface ipv6 address after json unmarshal: %v", err) - } - } - - epi.srcName = epMap["srcName"].(string) - epi.dstPrefix = epMap["dstPrefix"].(string) - - rb, _ := json.Marshal(epMap["routes"]) - var routes []string - json.Unmarshal(rb, &routes) - epi.routes = make([]*net.IPNet, 0) - for _, route := range routes { - ip, ipr, err := net.ParseCIDR(route) - if err == nil { - ipr.IP = ip - epi.routes = append(epi.routes, ipr) - } - } - epi.v4PoolID = epMap["v4PoolID"].(string) - epi.v6PoolID = epMap["v6PoolID"].(string) - - return nil -} - -func (epi *endpointInterface) CopyTo(dstEpi *endpointInterface) error { - dstEpi.mac = types.GetMacCopy(epi.mac) - dstEpi.addr = types.GetIPNetCopy(epi.addr) - dstEpi.addrv6 = types.GetIPNetCopy(epi.addrv6) - dstEpi.srcName = epi.srcName - dstEpi.dstPrefix = epi.dstPrefix - dstEpi.v4PoolID = epi.v4PoolID - dstEpi.v6PoolID = epi.v6PoolID - - for _, route := range epi.routes { - dstEpi.routes = append(dstEpi.routes, types.GetIPNetCopy(route)) - } - - return nil -} - -type endpointJoinInfo struct { - gw net.IP - gw6 net.IP - StaticRoutes []*types.StaticRoute - disableGatewayService bool -} - -func (ep *endpoint) Info() EndpointInfo { - n, err := ep.getNetworkFromStore() - if err != nil { - return nil - } - - ep, err = n.getEndpointFromStore(ep.ID()) - if err != nil { - return nil - } - - sb, ok := ep.getSandbox() - if !ok { - // endpoint hasn't joined any sandbox. - // Just return the endpoint - return ep - } - - if epi := sb.getEndpoint(ep.ID()); epi != nil { - return epi - } - - return nil -} - -func (ep *endpoint) DriverInfo() (map[string]interface{}, error) { - ep, err := ep.retrieveFromStore() - if err != nil { - return nil, err - } - - if sb, ok := ep.getSandbox(); ok { - if gwep := sb.getEndpointInGWNetwork(); gwep != nil && gwep.ID() != ep.ID() { - return gwep.DriverInfo() - } - } - - n, err := ep.getNetworkFromStore() - if err != nil { - return nil, fmt.Errorf("could not find network in store for driver info: %v", err) - } - - driver, err := n.driver() - if err != nil { - return nil, fmt.Errorf("failed to get driver info: %v", err) - } - - return driver.EndpointOperInfo(n.ID(), ep.ID()) -} - -func (ep *endpoint) Iface() InterfaceInfo { - ep.Lock() - defer ep.Unlock() - - if ep.iface != nil { - return ep.iface - } - - return nil -} - -func (ep *endpoint) Interface() driverapi.InterfaceInfo { - ep.Lock() - defer ep.Unlock() - - if ep.iface != nil { - return ep.iface - } - - return nil -} - -func (epi *endpointInterface) SetMacAddress(mac net.HardwareAddr) error { - if epi.mac != nil { - return types.ForbiddenErrorf("endpoint interface MAC address present (%s). Cannot be modified with %s.", epi.mac, mac) - } - if mac == nil { - return types.BadRequestErrorf("tried to set nil MAC address to endpoint interface") - } - epi.mac = types.GetMacCopy(mac) - return nil -} - -func (epi *endpointInterface) SetIPAddress(address *net.IPNet) error { - if address.IP == nil { - return types.BadRequestErrorf("tried to set nil IP address to endpoint interface") - } - if address.IP.To4() == nil { - return setAddress(&epi.addrv6, address) - } - return setAddress(&epi.addr, address) -} - -func setAddress(ifaceAddr **net.IPNet, address *net.IPNet) error { - if *ifaceAddr != nil { - return types.ForbiddenErrorf("endpoint interface IP present (%s). Cannot be modified with (%s).", *ifaceAddr, address) - } - *ifaceAddr = types.GetIPNetCopy(address) - return nil -} - -func (epi *endpointInterface) MacAddress() net.HardwareAddr { - return types.GetMacCopy(epi.mac) -} - -func (epi *endpointInterface) Address() *net.IPNet { - return types.GetIPNetCopy(epi.addr) -} - -func (epi *endpointInterface) AddressIPv6() *net.IPNet { - return types.GetIPNetCopy(epi.addrv6) -} - -func (epi *endpointInterface) SetNames(srcName string, dstPrefix string) error { - epi.srcName = srcName - epi.dstPrefix = dstPrefix - return nil -} - -func (ep *endpoint) InterfaceName() driverapi.InterfaceNameInfo { - ep.Lock() - defer ep.Unlock() - - if ep.iface != nil { - return ep.iface - } - - return nil -} - -func (ep *endpoint) AddStaticRoute(destination *net.IPNet, routeType int, nextHop net.IP) error { - ep.Lock() - defer ep.Unlock() - - r := types.StaticRoute{Destination: destination, RouteType: routeType, NextHop: nextHop} - - if routeType == types.NEXTHOP { - // If the route specifies a next-hop, then it's loosely routed (i.e. not bound to a particular interface). - ep.joinInfo.StaticRoutes = append(ep.joinInfo.StaticRoutes, &r) - } else { - // If the route doesn't specify a next-hop, it must be a connected route, bound to an interface. - ep.iface.routes = append(ep.iface.routes, r.Destination) - } - return nil -} - -func (ep *endpoint) Sandbox() Sandbox { - cnt, ok := ep.getSandbox() - if !ok { - return nil - } - return cnt -} - -func (ep *endpoint) StaticRoutes() []*types.StaticRoute { - ep.Lock() - defer ep.Unlock() - - if ep.joinInfo == nil { - return nil - } - - return ep.joinInfo.StaticRoutes -} - -func (ep *endpoint) Gateway() net.IP { - ep.Lock() - defer ep.Unlock() - - if ep.joinInfo == nil { - return net.IP{} - } - - return types.GetIPCopy(ep.joinInfo.gw) -} - -func (ep *endpoint) GatewayIPv6() net.IP { - ep.Lock() - defer ep.Unlock() - - if ep.joinInfo == nil { - return net.IP{} - } - - return types.GetIPCopy(ep.joinInfo.gw6) -} - -func (ep *endpoint) SetGateway(gw net.IP) error { - ep.Lock() - defer ep.Unlock() - - ep.joinInfo.gw = types.GetIPCopy(gw) - return nil -} - -func (ep *endpoint) SetGatewayIPv6(gw6 net.IP) error { - ep.Lock() - defer ep.Unlock() - - ep.joinInfo.gw6 = types.GetIPCopy(gw6) - return nil -} - -func (ep *endpoint) retrieveFromStore() (*endpoint, error) { - n, err := ep.getNetworkFromStore() - if err != nil { - return nil, fmt.Errorf("could not find network in store to get latest endpoint %s: %v", ep.Name(), err) - } - return n.getEndpointFromStore(ep.ID()) -} - -func (ep *endpoint) DisableGatewayService() { - ep.Lock() - defer ep.Unlock() - - ep.joinInfo.disableGatewayService = true -} diff --git a/vendor/github.com/docker/libnetwork/error.go b/vendor/github.com/docker/libnetwork/error.go deleted file mode 100644 index 4158c995..00000000 --- a/vendor/github.com/docker/libnetwork/error.go +++ /dev/null @@ -1,175 +0,0 @@ -package libnetwork - -import ( - "fmt" -) - -// ErrNoSuchNetwork is returned when a network query finds no result -type ErrNoSuchNetwork string - -func (nsn ErrNoSuchNetwork) Error() string { - return fmt.Sprintf("network %s not found", string(nsn)) -} - -// NotFound denotes the type of this error -func (nsn ErrNoSuchNetwork) NotFound() {} - -// ErrNoSuchEndpoint is returned when a endpoint query finds no result -type ErrNoSuchEndpoint string - -func (nse ErrNoSuchEndpoint) Error() string { - return fmt.Sprintf("endpoint %s not found", string(nse)) -} - -// NotFound denotes the type of this error -func (nse ErrNoSuchEndpoint) NotFound() {} - -// ErrInvalidNetworkDriver is returned if an invalid driver -// name is passed. -type ErrInvalidNetworkDriver string - -func (ind ErrInvalidNetworkDriver) Error() string { - return fmt.Sprintf("invalid driver bound to network: %s", string(ind)) -} - -// BadRequest denotes the type of this error -func (ind ErrInvalidNetworkDriver) BadRequest() {} - -// ErrInvalidJoin is returned if a join is attempted on an endpoint -// which already has a container joined. -type ErrInvalidJoin struct{} - -func (ij ErrInvalidJoin) Error() string { - return "a container has already joined the endpoint" -} - -// BadRequest denotes the type of this error -func (ij ErrInvalidJoin) BadRequest() {} - -// ErrNoContainer is returned when the endpoint has no container -// attached to it. -type ErrNoContainer struct{} - -func (nc ErrNoContainer) Error() string { - return "no container is attached to the endpoint" -} - -// Maskable denotes the type of this error -func (nc ErrNoContainer) Maskable() {} - -// ErrInvalidID is returned when a query-by-id method is being invoked -// with an empty id parameter -type ErrInvalidID string - -func (ii ErrInvalidID) Error() string { - return fmt.Sprintf("invalid id: %s", string(ii)) -} - -// BadRequest denotes the type of this error -func (ii ErrInvalidID) BadRequest() {} - -// ErrInvalidName is returned when a query-by-name or resource create method is -// invoked with an empty name parameter -type ErrInvalidName string - -func (in ErrInvalidName) Error() string { - return fmt.Sprintf("invalid name: %s", string(in)) -} - -// BadRequest denotes the type of this error -func (in ErrInvalidName) BadRequest() {} - -// ErrInvalidConfigFile type is returned when an invalid LibNetwork config file is detected -type ErrInvalidConfigFile string - -func (cf ErrInvalidConfigFile) Error() string { - return fmt.Sprintf("Invalid Config file %q", string(cf)) -} - -// NetworkTypeError type is returned when the network type string is not -// known to libnetwork. -type NetworkTypeError string - -func (nt NetworkTypeError) Error() string { - return fmt.Sprintf("unknown driver %q", string(nt)) -} - -// NotFound denotes the type of this error -func (nt NetworkTypeError) NotFound() {} - -// NetworkNameError is returned when a network with the same name already exists. -type NetworkNameError string - -func (nnr NetworkNameError) Error() string { - return fmt.Sprintf("network with name %s already exists", string(nnr)) -} - -// Forbidden denotes the type of this error -func (nnr NetworkNameError) Forbidden() {} - -// UnknownNetworkError is returned when libnetwork could not find in it's database -// a network with the same name and id. -type UnknownNetworkError struct { - name string - id string -} - -func (une *UnknownNetworkError) Error() string { - return fmt.Sprintf("unknown network %s id %s", une.name, une.id) -} - -// NotFound denotes the type of this error -func (une *UnknownNetworkError) NotFound() {} - -// ActiveEndpointsError is returned when a network is deleted which has active -// endpoints in it. -type ActiveEndpointsError struct { - name string - id string -} - -func (aee *ActiveEndpointsError) Error() string { - return fmt.Sprintf("network %s has active endpoints", aee.name) -} - -// Forbidden denotes the type of this error -func (aee *ActiveEndpointsError) Forbidden() {} - -// UnknownEndpointError is returned when libnetwork could not find in it's database -// an endpoint with the same name and id. -type UnknownEndpointError struct { - name string - id string -} - -func (uee *UnknownEndpointError) Error() string { - return fmt.Sprintf("unknown endpoint %s id %s", uee.name, uee.id) -} - -// NotFound denotes the type of this error -func (uee *UnknownEndpointError) NotFound() {} - -// ActiveContainerError is returned when an endpoint is deleted which has active -// containers attached to it. -type ActiveContainerError struct { - name string - id string -} - -func (ace *ActiveContainerError) Error() string { - return fmt.Sprintf("endpoint with name %s id %s has active containers", ace.name, ace.id) -} - -// Forbidden denotes the type of this error -func (ace *ActiveContainerError) Forbidden() {} - -// InvalidContainerIDError is returned when an invalid container id is passed -// in Join/Leave -type InvalidContainerIDError string - -func (id InvalidContainerIDError) Error() string { - return fmt.Sprintf("invalid container id %s", string(id)) -} - -// BadRequest denotes the type of this error -func (id InvalidContainerIDError) BadRequest() {} diff --git a/vendor/github.com/docker/libnetwork/errors_test.go b/vendor/github.com/docker/libnetwork/errors_test.go deleted file mode 100644 index 29bf6686..00000000 --- a/vendor/github.com/docker/libnetwork/errors_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package libnetwork - -import ( - "testing" - - "github.com/docker/libnetwork/types" -) - -func TestErrorInterfaces(t *testing.T) { - - badRequestErrorList := []error{ErrInvalidID(""), ErrInvalidName(""), ErrInvalidJoin{}, ErrInvalidNetworkDriver(""), InvalidContainerIDError(""), ErrNoSuchNetwork(""), ErrNoSuchEndpoint("")} - for _, err := range badRequestErrorList { - switch u := err.(type) { - case types.BadRequestError: - return - default: - t.Fatalf("Failed to detect err %v is of type BadRequestError. Got type: %T", err, u) - } - } - - maskableErrorList := []error{ErrNoContainer{}} - for _, err := range maskableErrorList { - switch u := err.(type) { - case types.MaskableError: - return - default: - t.Fatalf("Failed to detect err %v is of type MaskableError. Got type: %T", err, u) - } - } - - notFoundErrorList := []error{NetworkTypeError(""), &UnknownNetworkError{}, &UnknownEndpointError{}} - for _, err := range notFoundErrorList { - switch u := err.(type) { - case types.NotFoundError: - return - default: - t.Fatalf("Failed to detect err %v is of type NotFoundError. Got type: %T", err, u) - } - } - - forbiddenErrorList := []error{NetworkTypeError(""), &UnknownNetworkError{}, &UnknownEndpointError{}} - for _, err := range forbiddenErrorList { - switch u := err.(type) { - case types.ForbiddenError: - return - default: - t.Fatalf("Failed to detect err %v is of type ForbiddenError. Got type: %T", err, u) - } - } - -} diff --git a/vendor/github.com/docker/libnetwork/libnetwork_internal_test.go b/vendor/github.com/docker/libnetwork/libnetwork_internal_test.go deleted file mode 100644 index 44ee68f9..00000000 --- a/vendor/github.com/docker/libnetwork/libnetwork_internal_test.go +++ /dev/null @@ -1,456 +0,0 @@ -package libnetwork - -import ( - "encoding/json" - "fmt" - "net" - "testing" - - "github.com/docker/libnetwork/datastore" - "github.com/docker/libnetwork/driverapi" - "github.com/docker/libnetwork/ipamapi" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/testutils" - "github.com/docker/libnetwork/types" -) - -func TestDriverRegistration(t *testing.T) { - bridgeNetType := "bridge" - c, err := New() - if err != nil { - t.Fatal(err) - } - defer c.Stop() - err = c.(*controller).RegisterDriver(bridgeNetType, nil, driverapi.Capability{}) - if err == nil { - t.Fatalf("Expecting the RegisterDriver to fail for %s", bridgeNetType) - } - if _, ok := err.(driverapi.ErrActiveRegistration); !ok { - t.Fatalf("Failed for unexpected reason: %v", err) - } - err = c.(*controller).RegisterDriver("test-dummy", nil, driverapi.Capability{}) - if err != nil { - t.Fatalf("Test failed with an error %v", err) - } -} - -func TestIpamDriverRegistration(t *testing.T) { - c, err := New() - if err != nil { - t.Fatal(err) - } - defer c.Stop() - - err = c.(*controller).RegisterIpamDriver("", nil) - if err == nil { - t.Fatalf("Expected failure, but suceeded") - } - if _, ok := err.(types.BadRequestError); !ok { - t.Fatalf("Failed for unexpected reason: %v", err) - } - - err = c.(*controller).RegisterIpamDriver(ipamapi.DefaultIPAM, nil) - if err == nil { - t.Fatalf("Expected failure, but suceeded") - } - if _, ok := err.(types.ForbiddenError); !ok { - t.Fatalf("Failed for unexpected reason: %v", err) - } -} - -func TestNetworkMarshalling(t *testing.T) { - n := &network{ - name: "Miao", - id: "abccba", - ipamType: "default", - addrSpace: "viola", - networkType: "bridge", - enableIPv6: true, - persist: true, - ipamOptions: map[string]string{ - netlabel.MacAddress: "a:b:c:d:e:f", - }, - ipamV4Config: []*IpamConf{ - &IpamConf{ - PreferredPool: "10.2.0.0/16", - SubPool: "10.2.0.0/24", - Gateway: "", - AuxAddresses: nil, - }, - &IpamConf{ - PreferredPool: "10.2.0.0/16", - SubPool: "10.2.1.0/24", - Gateway: "10.2.1.254", - }, - }, - ipamV6Config: []*IpamConf{ - &IpamConf{ - PreferredPool: "abcd::/64", - SubPool: "abcd:abcd:abcd:abcd:abcd::/80", - Gateway: "abcd::29/64", - AuxAddresses: nil, - }, - }, - ipamV4Info: []*IpamInfo{ - &IpamInfo{ - PoolID: "ipoolverde123", - Meta: map[string]string{ - netlabel.Gateway: "10.2.1.255/16", - }, - IPAMData: driverapi.IPAMData{ - AddressSpace: "viola", - Pool: &net.IPNet{ - IP: net.IP{10, 2, 0, 0}, - Mask: net.IPMask{255, 255, 255, 0}, - }, - Gateway: nil, - AuxAddresses: nil, - }, - }, - &IpamInfo{ - PoolID: "ipoolblue345", - Meta: map[string]string{ - netlabel.Gateway: "10.2.1.255/16", - }, - IPAMData: driverapi.IPAMData{ - AddressSpace: "viola", - Pool: &net.IPNet{ - IP: net.IP{10, 2, 1, 0}, - Mask: net.IPMask{255, 255, 255, 0}, - }, - Gateway: &net.IPNet{IP: net.IP{10, 2, 1, 254}, Mask: net.IPMask{255, 255, 255, 0}}, - AuxAddresses: map[string]*net.IPNet{ - "ip3": &net.IPNet{IP: net.IP{10, 2, 1, 3}, Mask: net.IPMask{255, 255, 255, 0}}, - "ip5": &net.IPNet{IP: net.IP{10, 2, 1, 55}, Mask: net.IPMask{255, 255, 255, 0}}, - }, - }, - }, - &IpamInfo{ - PoolID: "weirdinfo", - IPAMData: driverapi.IPAMData{ - Gateway: &net.IPNet{ - IP: net.IP{11, 2, 1, 255}, - Mask: net.IPMask{255, 0, 0, 0}, - }, - }, - }, - }, - ipamV6Info: []*IpamInfo{ - &IpamInfo{ - PoolID: "ipoolv6", - IPAMData: driverapi.IPAMData{ - AddressSpace: "viola", - Pool: &net.IPNet{ - IP: net.IP{0xab, 0xcd, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - Mask: net.IPMask{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0}, - }, - Gateway: &net.IPNet{ - IP: net.IP{0xab, 0xcd, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29}, - Mask: net.IPMask{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0}, - }, - AuxAddresses: nil, - }, - }, - }, - } - - b, err := json.Marshal(n) - if err != nil { - t.Fatal(err) - } - - nn := &network{} - err = json.Unmarshal(b, nn) - if err != nil { - t.Fatal(err) - } - - if n.name != nn.name || n.id != nn.id || n.networkType != nn.networkType || n.ipamType != nn.ipamType || - n.addrSpace != nn.addrSpace || n.enableIPv6 != nn.enableIPv6 || - n.persist != nn.persist || !compareIpamConfList(n.ipamV4Config, nn.ipamV4Config) || - !compareIpamInfoList(n.ipamV4Info, nn.ipamV4Info) || !compareIpamConfList(n.ipamV6Config, nn.ipamV6Config) || - !compareIpamInfoList(n.ipamV6Info, nn.ipamV6Info) { - t.Fatalf("JSON marsh/unmarsh failed."+ - "\nOriginal:\n%#v\nDecoded:\n%#v"+ - "\nOriginal ipamV4Conf: %#v\n\nDecoded ipamV4Conf: %#v"+ - "\nOriginal ipamV4Info: %s\n\nDecoded ipamV4Info: %s"+ - "\nOriginal ipamV6Conf: %#v\n\nDecoded ipamV6Conf: %#v"+ - "\nOriginal ipamV6Info: %s\n\nDecoded ipamV6Info: %s", - n, nn, printIpamConf(n.ipamV4Config), printIpamConf(nn.ipamV4Config), - printIpamInfo(n.ipamV4Info), printIpamInfo(nn.ipamV4Info), - printIpamConf(n.ipamV6Config), printIpamConf(nn.ipamV6Config), - printIpamInfo(n.ipamV6Info), printIpamInfo(nn.ipamV6Info)) - } -} - -func printIpamConf(list []*IpamConf) string { - s := fmt.Sprintf("\n[]*IpamConfig{") - for _, i := range list { - s = fmt.Sprintf("%s %v,", s, i) - } - s = fmt.Sprintf("%s}", s) - return s -} - -func printIpamInfo(list []*IpamInfo) string { - s := fmt.Sprintf("\n[]*IpamInfo{") - for _, i := range list { - s = fmt.Sprintf("%s\n{\n%s\n}", s, i) - } - s = fmt.Sprintf("%s\n}", s) - return s -} - -func TestEndpointMarshalling(t *testing.T) { - ip, nw6, err := net.ParseCIDR("2001:3002:4003::122/64") - if err != nil { - t.Fatal(err) - } - nw6.IP = ip - - e := &endpoint{ - name: "Bau", - id: "efghijklmno", - sandboxID: "ambarabaciccicocco", - anonymous: true, - iface: &endpointInterface{ - mac: []byte{11, 12, 13, 14, 15, 16}, - addr: &net.IPNet{ - IP: net.IP{10, 0, 1, 23}, - Mask: net.IPMask{255, 255, 255, 0}, - }, - addrv6: nw6, - srcName: "veth12ab1314", - dstPrefix: "eth", - v4PoolID: "poolpool", - v6PoolID: "poolv6", - }, - } - - b, err := json.Marshal(e) - if err != nil { - t.Fatal(err) - } - - ee := &endpoint{} - err = json.Unmarshal(b, ee) - if err != nil { - t.Fatal(err) - } - - if e.name != ee.name || e.id != ee.id || e.sandboxID != ee.sandboxID || !compareEndpointInterface(e.iface, ee.iface) || e.anonymous != ee.anonymous { - t.Fatalf("JSON marsh/unmarsh failed.\nOriginal:\n%#v\nDecoded:\n%#v\nOriginal iface: %#v\nDecodediface:\n%#v", e, ee, e.iface, ee.iface) - } -} - -func compareEndpointInterface(a, b *endpointInterface) bool { - if a == b { - return true - } - if a == nil || b == nil { - return false - } - return a.srcName == b.srcName && a.dstPrefix == b.dstPrefix && a.v4PoolID == b.v4PoolID && a.v6PoolID == b.v6PoolID && - types.CompareIPNet(a.addr, b.addr) && types.CompareIPNet(a.addrv6, b.addrv6) -} - -func compareIpamConfList(listA, listB []*IpamConf) bool { - var a, b *IpamConf - if len(listA) != len(listB) { - return false - } - for i := 0; i < len(listA); i++ { - a = listA[i] - b = listB[i] - if a.PreferredPool != b.PreferredPool || - a.SubPool != b.SubPool || - a.Gateway != b.Gateway || !compareStringMaps(a.AuxAddresses, b.AuxAddresses) { - return false - } - } - return true -} - -func compareIpamInfoList(listA, listB []*IpamInfo) bool { - var a, b *IpamInfo - if len(listA) != len(listB) { - return false - } - for i := 0; i < len(listA); i++ { - a = listA[i] - b = listB[i] - if a.PoolID != b.PoolID || !compareStringMaps(a.Meta, b.Meta) || - !types.CompareIPNet(a.Gateway, b.Gateway) || - a.AddressSpace != b.AddressSpace || - !types.CompareIPNet(a.Pool, b.Pool) || - !compareAddresses(a.AuxAddresses, b.AuxAddresses) { - return false - } - } - return true -} - -func compareStringMaps(a, b map[string]string) bool { - if len(a) != len(b) { - return false - } - if len(a) > 0 { - for k := range a { - if a[k] != b[k] { - return false - } - } - } - return true -} - -func compareAddresses(a, b map[string]*net.IPNet) bool { - if len(a) != len(b) { - return false - } - if len(a) > 0 { - for k := range a { - if !types.CompareIPNet(a[k], b[k]) { - return false - } - } - } - return true -} - -func TestAuxAddresses(t *testing.T) { - c, err := New() - if err != nil { - t.Fatal(err) - } - defer c.Stop() - - n := &network{ipamType: ipamapi.DefaultIPAM, networkType: "bridge", ctrlr: c.(*controller)} - - input := []struct { - masterPool string - subPool string - auxAddresses map[string]string - good bool - }{ - {"192.168.0.0/16", "", map[string]string{"goodOne": "192.168.2.2"}, true}, - {"192.168.0.0/16", "", map[string]string{"badOne": "192.169.2.3"}, false}, - {"192.168.0.0/16", "192.168.1.0/24", map[string]string{"goodOne": "192.168.1.2"}, true}, - {"192.168.0.0/16", "192.168.1.0/24", map[string]string{"stillGood": "192.168.2.4"}, true}, - {"192.168.0.0/16", "192.168.1.0/24", map[string]string{"badOne": "192.169.2.4"}, false}, - } - - for _, i := range input { - - n.ipamV4Config = []*IpamConf{&IpamConf{PreferredPool: i.masterPool, SubPool: i.subPool, AuxAddresses: i.auxAddresses}} - - err = n.ipamAllocate() - - if i.good != (err == nil) { - t.Fatalf("Unexpected result for %v: %v", i, err) - } - - n.ipamRelease() - } -} - -func TestIpamReleaseOnNetDriverFailures(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - cfgOptions, err := OptionBoltdbWithRandomDBFile() - c, err := New(cfgOptions...) - if err != nil { - t.Fatal(err) - } - defer c.Stop() - - cc := c.(*controller) - bd := badDriver{failNetworkCreation: true} - cc.drivers[badDriverName] = &driverData{driver: &bd, capability: driverapi.Capability{DataScope: datastore.LocalScope}} - - // Test whether ipam state release is invoked on network create failure from net driver - // by checking whether subsequent network creation requesting same gateway IP succeeds - ipamOpt := NetworkOptionIpam(ipamapi.DefaultIPAM, "", []*IpamConf{&IpamConf{PreferredPool: "10.34.0.0/16", Gateway: "10.34.255.254"}}, nil, nil) - if _, err := c.NewNetwork(badDriverName, "badnet1", ipamOpt); err == nil { - t.Fatalf("bad network driver should have failed network creation") - } - - gnw, err := c.NewNetwork("bridge", "goodnet1", ipamOpt) - if err != nil { - t.Fatal(err) - } - gnw.Delete() - - // Now check whether ipam release works on endpoint creation failure - bd.failNetworkCreation = false - bnw, err := c.NewNetwork(badDriverName, "badnet2", ipamOpt) - if err != nil { - t.Fatal(err) - } - defer bnw.Delete() - - if _, err := bnw.CreateEndpoint("ep0"); err == nil { - t.Fatalf("bad network driver should have failed endpoint creation") - } - - // Now create good bridge network with different gateway - ipamOpt2 := NetworkOptionIpam(ipamapi.DefaultIPAM, "", []*IpamConf{&IpamConf{PreferredPool: "10.34.0.0/16", Gateway: "10.34.255.253"}}, nil, nil) - gnw, err = c.NewNetwork("bridge", "goodnet2", ipamOpt2) - if err != nil { - t.Fatal(err) - } - defer gnw.Delete() - - ep, err := gnw.CreateEndpoint("ep1") - if err != nil { - t.Fatal(err) - } - defer ep.Delete(false) - - expectedIP, _ := types.ParseCIDR("10.34.0.1/16") - if !types.CompareIPNet(ep.Info().Iface().Address(), expectedIP) { - t.Fatalf("Ipam release must have failed, endpoint has unexpected address: %v", ep.Info().Iface().Address()) - } -} - -var badDriverName = "bad network driver" - -type badDriver struct { - failNetworkCreation bool -} - -func (b *badDriver) CreateNetwork(nid string, options map[string]interface{}, ipV4Data, ipV6Data []driverapi.IPAMData) error { - if b.failNetworkCreation { - return fmt.Errorf("I will not create any network") - } - return nil -} -func (b *badDriver) DeleteNetwork(nid string) error { - return nil -} -func (b *badDriver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, options map[string]interface{}) error { - return fmt.Errorf("I will not create any endpoint") -} -func (b *badDriver) DeleteEndpoint(nid, eid string) error { - return nil -} -func (b *badDriver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) { - return nil, nil -} -func (b *badDriver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error { - return fmt.Errorf("I will not allow any join") -} -func (b *badDriver) Leave(nid, eid string) error { - return nil -} -func (b *badDriver) DiscoverNew(dType driverapi.DiscoveryType, data interface{}) error { - return nil -} -func (b *badDriver) DiscoverDelete(dType driverapi.DiscoveryType, data interface{}) error { - return nil -} -func (b *badDriver) Type() string { - return badDriverName -} diff --git a/vendor/github.com/docker/libnetwork/libnetwork_test.go b/vendor/github.com/docker/libnetwork/libnetwork_test.go deleted file mode 100644 index 5f305e02..00000000 --- a/vendor/github.com/docker/libnetwork/libnetwork_test.go +++ /dev/null @@ -1,2354 +0,0 @@ -package libnetwork_test - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "os" - "os/exec" - "runtime" - "strconv" - "strings" - "sync" - "testing" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/plugins" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/libnetwork" - "github.com/docker/libnetwork/config" - "github.com/docker/libnetwork/datastore" - "github.com/docker/libnetwork/driverapi" - "github.com/docker/libnetwork/ipamapi" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" - "github.com/docker/libnetwork/osl" - "github.com/docker/libnetwork/testutils" - "github.com/docker/libnetwork/types" - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/vishvananda/netlink" - "github.com/vishvananda/netns" -) - -const ( - bridgeNetType = "bridge" -) - -var controller libnetwork.NetworkController - -func TestMain(m *testing.M) { - if reexec.Init() { - return - } - - if err := createController(); err != nil { - log.Errorf("Error creating controller: %v", err) - os.Exit(1) - } - - //libnetwork.SetTestDataStore(controller, datastore.NewCustomDataStore(datastore.NewMockStore())) - - x := m.Run() - controller.Stop() - os.Exit(x) -} - -func createController() error { - var err error - - // Cleanup local datastore file - os.Remove(datastore.DefaultScopes("")[datastore.LocalScope].Client.Address) - - option := options.Generic{ - "EnableIPForwarding": true, - } - - genericOption := make(map[string]interface{}) - genericOption[netlabel.GenericData] = option - - cfgOptions, err := libnetwork.OptionBoltdbWithRandomDBFile() - if err != nil { - return err - } - controller, err = libnetwork.New(append(cfgOptions, config.OptionDriverConfig(bridgeNetType, genericOption))...) - if err != nil { - return err - } - - return nil -} - -func createTestNetwork(networkType, networkName string, netOption options.Generic, ipamV4Configs, ipamV6Configs []*libnetwork.IpamConf) (libnetwork.Network, error) { - return controller.NewNetwork(networkType, networkName, - libnetwork.NetworkOptionGeneric(netOption), - libnetwork.NetworkOptionIpam(ipamapi.DefaultIPAM, "", ipamV4Configs, ipamV6Configs, nil)) -} - -func getEmptyGenericOption() map[string]interface{} { - genericOption := make(map[string]interface{}) - genericOption[netlabel.GenericData] = options.Generic{} - return genericOption -} - -func getPortMapping() []types.PortBinding { - return []types.PortBinding{ - {Proto: types.TCP, Port: uint16(230), HostPort: uint16(23000)}, - {Proto: types.UDP, Port: uint16(200), HostPort: uint16(22000)}, - {Proto: types.TCP, Port: uint16(120), HostPort: uint16(12000)}, - {Proto: types.TCP, Port: uint16(320), HostPort: uint16(32000), HostPortEnd: uint16(32999)}, - {Proto: types.UDP, Port: uint16(420), HostPort: uint16(42000), HostPortEnd: uint16(42001)}, - } -} - -func TestNull(t *testing.T) { - cnt, err := controller.NewSandbox("null_container", - libnetwork.OptionHostname("test"), - libnetwork.OptionDomainname("docker.io"), - libnetwork.OptionExtraHost("web", "192.168.0.1")) - if err != nil { - t.Fatal(err) - } - - network, err := createTestNetwork("null", "testnull", options.Generic{}, nil, nil) - if err != nil { - t.Fatal(err) - } - - ep, err := network.CreateEndpoint("testep") - if err != nil { - t.Fatal(err) - } - - err = ep.Join(cnt) - if err != nil { - t.Fatal(err) - } - - err = ep.Leave(cnt) - if err != nil { - t.Fatal(err) - } - - if err := ep.Delete(false); err != nil { - t.Fatal(err) - } - - if err := cnt.Delete(); err != nil { - t.Fatal(err) - } - - // host type is special network. Cannot be removed. - err = network.Delete() - if err == nil { - t.Fatal(err) - } - if _, ok := err.(types.ForbiddenError); !ok { - t.Fatalf("Unexpected error type") - } -} - -func TestHost(t *testing.T) { - sbx1, err := controller.NewSandbox("host_c1", - libnetwork.OptionHostname("test1"), - libnetwork.OptionDomainname("docker.io"), - libnetwork.OptionExtraHost("web", "192.168.0.1"), - libnetwork.OptionUseDefaultSandbox()) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := sbx1.Delete(); err != nil { - t.Fatal(err) - } - }() - - sbx2, err := controller.NewSandbox("host_c2", - libnetwork.OptionHostname("test2"), - libnetwork.OptionDomainname("docker.io"), - libnetwork.OptionExtraHost("web", "192.168.0.1"), - libnetwork.OptionUseDefaultSandbox()) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := sbx2.Delete(); err != nil { - t.Fatal(err) - } - }() - - network, err := createTestNetwork("host", "testhost", options.Generic{}, nil, nil) - if err != nil { - t.Fatal(err) - } - - ep1, err := network.CreateEndpoint("testep1") - if err != nil { - t.Fatal(err) - } - - if err := ep1.Join(sbx1); err != nil { - t.Fatal(err) - } - - ep2, err := network.CreateEndpoint("testep2") - if err != nil { - t.Fatal(err) - } - - if err := ep2.Join(sbx2); err != nil { - t.Fatal(err) - } - - if err := ep1.Leave(sbx1); err != nil { - t.Fatal(err) - } - - if err := ep2.Leave(sbx2); err != nil { - t.Fatal(err) - } - - if err := ep1.Delete(false); err != nil { - t.Fatal(err) - } - - if err := ep2.Delete(false); err != nil { - t.Fatal(err) - } - - // Try to create another host endpoint and join/leave that. - cnt3, err := controller.NewSandbox("host_c3", - libnetwork.OptionHostname("test3"), - libnetwork.OptionDomainname("docker.io"), - libnetwork.OptionExtraHost("web", "192.168.0.1"), - libnetwork.OptionUseDefaultSandbox()) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := cnt3.Delete(); err != nil { - t.Fatal(err) - } - }() - - ep3, err := network.CreateEndpoint("testep3") - if err != nil { - t.Fatal(err) - } - - if err := ep3.Join(sbx2); err != nil { - t.Fatal(err) - } - - if err := ep3.Leave(sbx2); err != nil { - t.Fatal(err) - } - - if err := ep3.Delete(false); err != nil { - t.Fatal(err) - } - - // host type is special network. Cannot be removed. - err = network.Delete() - if err == nil { - t.Fatal(err) - } - if _, ok := err.(types.ForbiddenError); !ok { - t.Fatalf("Unexpected error type") - } -} - -func TestBridge(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - netOption := options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork", - "EnableIPv6": true, - "EnableICC": true, - "EnableIPMasquerade": true, - }, - } - ipamV4ConfList := []*libnetwork.IpamConf{&libnetwork.IpamConf{PreferredPool: "192.168.100.0/24", Gateway: "192.168.100.1"}} - ipamV6ConfList := []*libnetwork.IpamConf{&libnetwork.IpamConf{PreferredPool: "fe90::/64", Gateway: "fe90::22"}} - - network, err := createTestNetwork(bridgeNetType, "testnetwork", netOption, ipamV4ConfList, ipamV6ConfList) - if err != nil { - t.Fatal(err) - } - - ep, err := network.CreateEndpoint("testep", libnetwork.CreateOptionPortMapping(getPortMapping())) - if err != nil { - t.Fatal(err) - } - - epInfo, err := ep.DriverInfo() - if err != nil { - t.Fatal(err) - } - pmd, ok := epInfo[netlabel.PortMap] - if !ok { - t.Fatalf("Could not find expected info in endpoint data") - } - pm, ok := pmd.([]types.PortBinding) - if !ok { - t.Fatalf("Unexpected format for port mapping in endpoint operational data") - } - if len(pm) != 5 { - t.Fatalf("Incomplete data for port mapping in endpoint operational data: %d", len(pm)) - } - - if err := ep.Delete(false); err != nil { - t.Fatal(err) - } - - if err := network.Delete(); err != nil { - t.Fatal(err) - } -} - -// Testing IPV6 from MAC address -func TestBridgeIpv6FromMac(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - netOption := options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testipv6mac", - "EnableIPv6": true, - "EnableICC": true, - "EnableIPMasquerade": true, - }, - } - ipamV4ConfList := []*libnetwork.IpamConf{&libnetwork.IpamConf{PreferredPool: "192.168.100.0/24", Gateway: "192.168.100.1"}} - ipamV6ConfList := []*libnetwork.IpamConf{&libnetwork.IpamConf{PreferredPool: "fe90::/64", Gateway: "fe90::22"}} - - network, err := controller.NewNetwork(bridgeNetType, "testipv6mac", - libnetwork.NetworkOptionGeneric(netOption), - libnetwork.NetworkOptionIpam(ipamapi.DefaultIPAM, "", ipamV4ConfList, ipamV6ConfList, nil), - libnetwork.NetworkOptionDeferIPv6Alloc(true)) - if err != nil { - t.Fatal(err) - } - - mac := net.HardwareAddr{0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff} - epOption := options.Generic{netlabel.MacAddress: mac} - - ep, err := network.CreateEndpoint("testep", libnetwork.EndpointOptionGeneric(epOption)) - if err != nil { - t.Fatal(err) - } - - iface := ep.Info().Iface() - if !bytes.Equal(iface.MacAddress(), mac) { - t.Fatalf("Unexpected mac address: %v", iface.MacAddress()) - } - - ip, expIP, _ := net.ParseCIDR("fe90::aabb:ccdd:eeff/64") - expIP.IP = ip - if !types.CompareIPNet(expIP, iface.AddressIPv6()) { - t.Fatalf("Expected %v. Got: %v", expIP, iface.AddressIPv6()) - } - - if err := ep.Delete(false); err != nil { - t.Fatal(err) - } - - if err := network.Delete(); err != nil { - t.Fatal(err) - } -} - -func TestUnknownDriver(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - _, err := createTestNetwork("unknowndriver", "testnetwork", options.Generic{}, nil, nil) - if err == nil { - t.Fatal("Expected to fail. But instead succeeded") - } - - if _, ok := err.(types.NotFoundError); !ok { - t.Fatalf("Did not fail with expected error. Actual error: %v", err) - } -} - -func TestNilRemoteDriver(t *testing.T) { - _, err := controller.NewNetwork("framerelay", "dummy", - libnetwork.NetworkOptionGeneric(getEmptyGenericOption())) - if err == nil { - t.Fatal("Expected to fail. But instead succeeded") - } - - if _, ok := err.(types.NotFoundError); !ok { - t.Fatalf("Did not fail with expected error. Actual error: %v", err) - } -} - -func TestNetworkName(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - netOption := options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork", - }, - } - - _, err := createTestNetwork(bridgeNetType, "", netOption, nil, nil) - if err == nil { - t.Fatal("Expected to fail. But instead succeeded") - } - - if _, ok := err.(libnetwork.ErrInvalidName); !ok { - t.Fatalf("Expected to fail with ErrInvalidName error. Got %v", err) - } - - networkName := "testnetwork" - n, err := createTestNetwork(bridgeNetType, networkName, netOption, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := n.Delete(); err != nil { - t.Fatal(err) - } - }() - - if n.Name() != networkName { - t.Fatalf("Expected network name %s, got %s", networkName, n.Name()) - } -} - -func TestNetworkType(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - netOption := options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork", - }, - } - - n, err := createTestNetwork(bridgeNetType, "testnetwork", netOption, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := n.Delete(); err != nil { - t.Fatal(err) - } - }() - - if n.Type() != bridgeNetType { - t.Fatalf("Expected network type %s, got %s", bridgeNetType, n.Type()) - } -} - -func TestNetworkID(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - netOption := options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork", - }, - } - - n, err := createTestNetwork(bridgeNetType, "testnetwork", netOption, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := n.Delete(); err != nil { - t.Fatal(err) - } - }() - - if n.ID() == "" { - t.Fatal("Expected non-empty network id") - } -} - -func TestDeleteNetworkWithActiveEndpoints(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - netOption := options.Generic{ - "BridgeName": "testnetwork", - } - option := options.Generic{ - netlabel.GenericData: netOption, - } - - network, err := createTestNetwork(bridgeNetType, "testnetwork", option, nil, nil) - if err != nil { - t.Fatal(err) - } - - ep, err := network.CreateEndpoint("testep") - if err != nil { - t.Fatal(err) - } - - err = network.Delete() - if err == nil { - t.Fatal("Expected to fail. But instead succeeded") - } - - if _, ok := err.(*libnetwork.ActiveEndpointsError); !ok { - t.Fatalf("Did not fail with expected error. Actual error: %v", err) - } - - // Done testing. Now cleanup. - if err := ep.Delete(false); err != nil { - t.Fatal(err) - } - - if err := network.Delete(); err != nil { - t.Fatal(err) - } -} - -func TestUnknownNetwork(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - netOption := options.Generic{ - "BridgeName": "testnetwork", - } - option := options.Generic{ - netlabel.GenericData: netOption, - } - - network, err := createTestNetwork(bridgeNetType, "testnetwork", option, nil, nil) - if err != nil { - t.Fatal(err) - } - - err = network.Delete() - if err != nil { - t.Fatal(err) - } - - err = network.Delete() - if err == nil { - t.Fatal("Expected to fail. But instead succeeded") - } - - if _, ok := err.(*libnetwork.UnknownNetworkError); !ok { - t.Fatalf("Did not fail with expected error. Actual error: %v", err) - } -} - -func TestUnknownEndpoint(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - netOption := options.Generic{ - "BridgeName": "testnetwork", - } - option := options.Generic{ - netlabel.GenericData: netOption, - } - ipamV4ConfList := []*libnetwork.IpamConf{&libnetwork.IpamConf{PreferredPool: "192.168.100.0/24"}} - - network, err := createTestNetwork(bridgeNetType, "testnetwork", option, ipamV4ConfList, nil) - if err != nil { - t.Fatal(err) - } - - _, err = network.CreateEndpoint("") - if err == nil { - t.Fatal("Expected to fail. But instead succeeded") - } - if _, ok := err.(libnetwork.ErrInvalidName); !ok { - t.Fatalf("Expected to fail with ErrInvalidName error. Actual error: %v", err) - } - - ep, err := network.CreateEndpoint("testep") - if err != nil { - t.Fatal(err) - } - - err = ep.Delete(false) - if err != nil { - t.Fatal(err) - } - - // Done testing. Now cleanup - if err := network.Delete(); err != nil { - t.Fatal(err) - } -} - -func TestNetworkEndpointsWalkers(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - // Create network 1 and add 2 endpoint: ep11, ep12 - netOption := options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "network1", - }, - } - - net1, err := createTestNetwork(bridgeNetType, "network1", netOption, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := net1.Delete(); err != nil { - t.Fatal(err) - } - }() - - ep11, err := net1.CreateEndpoint("ep11") - if err != nil { - t.Fatal(err) - } - defer func() { - if err := ep11.Delete(false); err != nil { - t.Fatal(err) - } - }() - - ep12, err := net1.CreateEndpoint("ep12") - if err != nil { - t.Fatal(err) - } - defer func() { - if err := ep12.Delete(false); err != nil { - t.Fatal(err) - } - }() - - // Test list methods on net1 - epList1 := net1.Endpoints() - if len(epList1) != 2 { - t.Fatalf("Endpoints() returned wrong number of elements: %d instead of 2", len(epList1)) - } - // endpoint order is not guaranteed - for _, e := range epList1 { - if e != ep11 && e != ep12 { - t.Fatal("Endpoints() did not return all the expected elements") - } - } - - // Test Endpoint Walk method - var epName string - var epWanted libnetwork.Endpoint - wlk := func(ep libnetwork.Endpoint) bool { - if ep.Name() == epName { - epWanted = ep - return true - } - return false - } - - // Look for ep1 on network1 - epName = "ep11" - net1.WalkEndpoints(wlk) - if epWanted == nil { - t.Fatal(err) - } - if ep11 != epWanted { - t.Fatal(err) - } - - current := len(controller.Networks()) - - // Create network 2 - netOption = options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "network2", - }, - } - - net2, err := createTestNetwork(bridgeNetType, "network2", netOption, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := net2.Delete(); err != nil { - t.Fatal(err) - } - }() - - // Test Networks method - if len(controller.Networks()) != current+1 { - t.Fatalf("Did not find the expected number of networks") - } - - // Test Network Walk method - var netName string - var netWanted libnetwork.Network - nwWlk := func(nw libnetwork.Network) bool { - if nw.Name() == netName { - netWanted = nw - return true - } - return false - } - - // Look for network named "network1" and "network2" - netName = "network1" - controller.WalkNetworks(nwWlk) - if netWanted == nil { - t.Fatal(err) - } - if net1.ID() != netWanted.ID() { - t.Fatal(err) - } - - netName = "network2" - controller.WalkNetworks(nwWlk) - if netWanted == nil { - t.Fatal(err) - } - if net2.ID() != netWanted.ID() { - t.Fatal(err) - } -} - -func TestDuplicateEndpoint(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - netOption := options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork", - }, - } - n, err := createTestNetwork(bridgeNetType, "testnetwork", netOption, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := n.Delete(); err != nil { - t.Fatal(err) - } - }() - - ep, err := n.CreateEndpoint("ep1") - if err != nil { - t.Fatal(err) - } - defer func() { - if err := ep.Delete(false); err != nil { - t.Fatal(err) - } - }() - - ep2, err := n.CreateEndpoint("ep1") - defer func() { - // Cleanup ep2 as well, else network cleanup might fail for failure cases - if ep2 != nil { - if err := ep2.Delete(false); err != nil { - t.Fatal(err) - } - } - }() - - if err == nil { - t.Fatal("Expected to fail. But instead succeeded") - } - - if _, ok := err.(types.ForbiddenError); !ok { - t.Fatalf("Did not fail with expected error. Actual error: %v", err) - } -} - -func TestControllerQuery(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - // Create network 1 - netOption := options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "network1", - }, - } - net1, err := createTestNetwork(bridgeNetType, "network1", netOption, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := net1.Delete(); err != nil { - t.Fatal(err) - } - }() - - // Create network 2 - netOption = options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "network2", - }, - } - net2, err := createTestNetwork(bridgeNetType, "network2", netOption, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := net2.Delete(); err != nil { - t.Fatal(err) - } - }() - - _, err = controller.NetworkByName("") - if err == nil { - t.Fatalf("NetworkByName() succeeded with invalid target name") - } - if _, ok := err.(libnetwork.ErrInvalidName); !ok { - t.Fatalf("Expected NetworkByName() to fail with ErrInvalidName error. Got: %v", err) - } - - _, err = controller.NetworkByID("") - if err == nil { - t.Fatalf("NetworkByID() succeeded with invalid target id") - } - if _, ok := err.(libnetwork.ErrInvalidID); !ok { - t.Fatalf("NetworkByID() failed with unexpected error: %v", err) - } - - g, err := controller.NetworkByID("network1") - if err == nil { - t.Fatalf("Unexpected success for NetworkByID(): %v", g) - } - if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { - t.Fatalf("NetworkByID() failed with unexpected error: %v", err) - } - - g, err = controller.NetworkByName("network1") - if err != nil { - t.Fatalf("Unexpected failure for NetworkByName(): %v", err) - } - if g == nil { - t.Fatalf("NetworkByName() did not find the network") - } - - if g != net1 { - t.Fatalf("NetworkByName() returned the wrong network") - } - - g, err = controller.NetworkByID(net1.ID()) - if err != nil { - t.Fatalf("Unexpected failure for NetworkByID(): %v", err) - } - if net1.ID() != g.ID() { - t.Fatalf("NetworkByID() returned unexpected element: %v", g) - } - - g, err = controller.NetworkByName("network2") - if err != nil { - t.Fatalf("Unexpected failure for NetworkByName(): %v", err) - } - if g == nil { - t.Fatalf("NetworkByName() did not find the network") - } - - if g != net2 { - t.Fatalf("NetworkByName() returned the wrong network") - } - - g, err = controller.NetworkByID(net2.ID()) - if err != nil { - t.Fatalf("Unexpected failure for NetworkByID(): %v", err) - } - if net2.ID() != g.ID() { - t.Fatalf("NetworkByID() returned unexpected element: %v", g) - } -} - -func TestNetworkQuery(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - // Create network 1 and add 2 endpoint: ep11, ep12 - netOption := options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "network1", - }, - } - net1, err := createTestNetwork(bridgeNetType, "network1", netOption, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := net1.Delete(); err != nil { - t.Fatal(err) - } - }() - - ep11, err := net1.CreateEndpoint("ep11") - if err != nil { - t.Fatal(err) - } - defer func() { - if err := ep11.Delete(false); err != nil { - t.Fatal(err) - } - }() - - ep12, err := net1.CreateEndpoint("ep12") - if err != nil { - t.Fatal(err) - } - defer func() { - if err := ep12.Delete(false); err != nil { - t.Fatal(err) - } - }() - - e, err := net1.EndpointByName("ep11") - if err != nil { - t.Fatal(err) - } - if ep11 != e { - t.Fatalf("EndpointByName() returned %v instead of %v", e, ep11) - } - - e, err = net1.EndpointByName("") - if err == nil { - t.Fatalf("EndpointByName() succeeded with invalid target name") - } - if _, ok := err.(libnetwork.ErrInvalidName); !ok { - t.Fatalf("Expected EndpointByName() to fail with ErrInvalidName error. Got: %v", err) - } - - e, err = net1.EndpointByName("IamNotAnEndpoint") - if err == nil { - t.Fatalf("EndpointByName() succeeded with unknown target name") - } - if _, ok := err.(libnetwork.ErrNoSuchEndpoint); !ok { - t.Fatal(err) - } - if e != nil { - t.Fatalf("EndpointByName(): expected nil, got %v", e) - } - - e, err = net1.EndpointByID(ep12.ID()) - if err != nil { - t.Fatal(err) - } - if ep12.ID() != e.ID() { - t.Fatalf("EndpointByID() returned %v instead of %v", e, ep12) - } - - e, err = net1.EndpointByID("") - if err == nil { - t.Fatalf("EndpointByID() succeeded with invalid target id") - } - if _, ok := err.(libnetwork.ErrInvalidID); !ok { - t.Fatalf("EndpointByID() failed with unexpected error: %v", err) - } -} - -const containerID = "valid_c" - -func checkSandbox(t *testing.T, info libnetwork.EndpointInfo) { - origns, err := netns.Get() - if err != nil { - t.Fatalf("Could not get the current netns: %v", err) - } - defer origns.Close() - - key := info.Sandbox().Key() - f, err := os.OpenFile(key, os.O_RDONLY, 0) - if err != nil { - t.Fatalf("Failed to open network namespace path %q: %v", key, err) - } - defer f.Close() - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - nsFD := f.Fd() - if err = netns.Set(netns.NsHandle(nsFD)); err != nil { - t.Fatalf("Setting to the namespace pointed to by the sandbox %s failed: %v", key, err) - } - defer netns.Set(origns) - - _, err = netlink.LinkByName("eth0") - if err != nil { - t.Fatalf("Could not find the interface eth0 inside the sandbox: %v", err) - } - - _, err = netlink.LinkByName("eth1") - if err != nil { - t.Fatalf("Could not find the interface eth1 inside the sandbox: %v", err) - } -} - -func TestEndpointJoin(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - // Create network 1 and add 2 endpoint: ep11, ep12 - netOption := options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork1", - "EnableIPv6": true, - "EnableICC": true, - "EnableIPMasquerade": true, - }, - } - ipamV6ConfList := []*libnetwork.IpamConf{&libnetwork.IpamConf{PreferredPool: "fe90::/64", Gateway: "fe90::22"}} - n1, err := controller.NewNetwork(bridgeNetType, "testnetwork1", - libnetwork.NetworkOptionGeneric(netOption), - libnetwork.NetworkOptionIpam(ipamapi.DefaultIPAM, "", nil, ipamV6ConfList, nil), - libnetwork.NetworkOptionDeferIPv6Alloc(true)) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := n1.Delete(); err != nil { - t.Fatal(err) - } - }() - - ep1, err := n1.CreateEndpoint("ep1") - if err != nil { - t.Fatal(err) - } - defer func() { - if err := ep1.Delete(false); err != nil { - t.Fatal(err) - } - }() - - // Validate if ep.Info() only gives me IP address info and not names and gateway during CreateEndpoint() - info := ep1.Info() - iface := info.Iface() - if iface.Address() != nil && iface.Address().IP.To4() == nil { - t.Fatalf("Invalid IP address returned: %v", iface.Address()) - } - if iface.AddressIPv6() != nil && iface.AddressIPv6().IP == nil { - t.Fatalf("Invalid IPv6 address returned: %v", iface.Address()) - } - - if len(info.Gateway()) != 0 { - t.Fatalf("Expected empty gateway for an empty endpoint. Instead found a gateway: %v", info.Gateway()) - } - if len(info.GatewayIPv6()) != 0 { - t.Fatalf("Expected empty gateway for an empty ipv6 endpoint. Instead found a gateway: %v", info.GatewayIPv6()) - } - - if info.Sandbox() != nil { - t.Fatalf("Expected an empty sandbox key for an empty endpoint. Instead found a non-empty sandbox key: %s", info.Sandbox().Key()) - } - - // test invalid joins - err = ep1.Join(nil) - if err == nil { - t.Fatalf("Expected to fail join with nil Sandbox") - } - if _, ok := err.(types.BadRequestError); !ok { - t.Fatalf("Unexpected error type returned: %T", err) - } - - fsbx := &fakeSandbox{} - if err = ep1.Join(fsbx); err == nil { - t.Fatalf("Expected to fail join with invalid Sandbox") - } - if _, ok := err.(types.BadRequestError); !ok { - t.Fatalf("Unexpected error type returned: %T", err) - } - - sb, err := controller.NewSandbox(containerID, - libnetwork.OptionHostname("test"), - libnetwork.OptionDomainname("docker.io"), - libnetwork.OptionExtraHost("web", "192.168.0.1")) - if err != nil { - t.Fatal(err) - } - - defer func() { - if err := sb.Delete(); err != nil { - t.Fatal(err) - } - }() - - err = ep1.Join(sb) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - defer func() { - err = ep1.Leave(sb) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - }() - - // Validate if ep.Info() only gives valid gateway and sandbox key after has container has joined. - info = ep1.Info() - if len(info.Gateway()) == 0 { - t.Fatalf("Expected a valid gateway for a joined endpoint. Instead found an invalid gateway: %v", info.Gateway()) - } - if len(info.GatewayIPv6()) == 0 { - t.Fatalf("Expected a valid ipv6 gateway for a joined endpoint. Instead found an invalid gateway: %v", info.GatewayIPv6()) - } - - if info.Sandbox() == nil { - t.Fatalf("Expected an non-empty sandbox key for a joined endpoint. Instead found a empty sandbox key") - } - - // Check endpoint provided container information - if ep1.Info().Sandbox().Key() != sb.Key() { - t.Fatalf("Endpoint Info returned unexpected sandbox key: %s", sb.Key()) - } - - // Attempt retrieval of endpoint interfaces statistics - stats, err := sb.Statistics() - if err != nil { - t.Fatal(err) - } - if _, ok := stats["eth0"]; !ok { - t.Fatalf("Did not find eth0 statistics") - } - - // Now test the container joining another network - n2, err := createTestNetwork(bridgeNetType, "testnetwork2", - options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork2", - }, - }, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := n2.Delete(); err != nil { - t.Fatal(err) - } - }() - - ep2, err := n2.CreateEndpoint("ep2") - if err != nil { - t.Fatal(err) - } - defer func() { - if err := ep2.Delete(false); err != nil { - t.Fatal(err) - } - }() - - err = ep2.Join(sb) - if err != nil { - t.Fatal(err) - } - runtime.LockOSThread() - defer func() { - err = ep2.Leave(sb) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - }() - - if ep1.Info().Sandbox().Key() != ep2.Info().Sandbox().Key() { - t.Fatalf("ep1 and ep2 returned different container sandbox key") - } - - checkSandbox(t, info) -} - -type fakeSandbox struct{} - -func (f *fakeSandbox) ID() string { - return "fake sandbox" -} - -func (f *fakeSandbox) ContainerID() string { - return "" -} - -func (f *fakeSandbox) Key() string { - return "fake key" -} - -func (f *fakeSandbox) Labels() map[string]interface{} { - return nil -} - -func (f *fakeSandbox) Statistics() (map[string]*types.InterfaceStatistics, error) { - return nil, nil -} - -func (f *fakeSandbox) Refresh(opts ...libnetwork.SandboxOption) error { - return nil -} - -func (f *fakeSandbox) Delete() error { - return nil -} - -func (f *fakeSandbox) Rename(name string) error { - return nil -} - -func (f *fakeSandbox) SetKey(key string) error { - return nil -} - -func (f *fakeSandbox) ResolveName(name string) net.IP { - return nil -} - -func (f *fakeSandbox) ResolveIP(ip string) string { - return "" -} - -func TestExternalKey(t *testing.T) { - externalKeyTest(t, false) -} - -func TestExternalKeyWithReexec(t *testing.T) { - externalKeyTest(t, true) -} - -func externalKeyTest(t *testing.T, reexec bool) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork", - }, - }, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := n.Delete(); err != nil { - t.Fatal(err) - } - }() - - ep, err := n.CreateEndpoint("ep1") - if err != nil { - t.Fatal(err) - } - defer func() { - err = ep.Delete(false) - if err != nil { - t.Fatal(err) - } - }() - - ep2, err := n.CreateEndpoint("ep2") - if err != nil { - t.Fatal(err) - } - defer func() { - err = ep2.Delete(false) - if err != nil { - t.Fatal(err) - } - }() - - cnt, err := controller.NewSandbox(containerID, - libnetwork.OptionHostname("test"), - libnetwork.OptionDomainname("docker.io"), - libnetwork.OptionUseExternalKey(), - libnetwork.OptionExtraHost("web", "192.168.0.1")) - defer func() { - if err := cnt.Delete(); err != nil { - t.Fatal(err) - } - osl.GC() - }() - - // Join endpoint to sandbox before SetKey - err = ep.Join(cnt) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - defer func() { - err = ep.Leave(cnt) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - }() - - sbox := ep.Info().Sandbox() - if sbox == nil { - t.Fatalf("Expected to have a valid Sandbox") - } - - if reexec { - err := reexecSetKey("this-must-fail", containerID, controller.ID()) - if err == nil { - t.Fatalf("SetExternalKey must fail if the corresponding namespace is not created") - } - } else { - // Setting an non-existing key (namespace) must fail - if err := sbox.SetKey("this-must-fail"); err == nil { - t.Fatalf("Setkey must fail if the corresponding namespace is not created") - } - } - - // Create a new OS sandbox using the osl API before using it in SetKey - if extOsBox, err := osl.NewSandbox("ValidKey", true); err != nil { - t.Fatalf("Failed to create new osl sandbox") - } else { - defer func() { - if err := extOsBox.Destroy(); err != nil { - log.Warnf("Failed to remove os sandbox: %v", err) - } - }() - } - - if reexec { - err := reexecSetKey("ValidKey", containerID, controller.ID()) - if err != nil { - t.Fatalf("SetExternalKey failed with %v", err) - } - } else { - if err := sbox.SetKey("ValidKey"); err != nil { - t.Fatalf("Setkey failed with %v", err) - } - } - - // Join endpoint to sandbox after SetKey - err = ep2.Join(sbox) - if err != nil { - t.Fatal(err) - } - runtime.LockOSThread() - defer func() { - err = ep2.Leave(sbox) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - }() - - if ep.Info().Sandbox().Key() != ep2.Info().Sandbox().Key() { - t.Fatalf("ep1 and ep2 returned different container sandbox key") - } - - checkSandbox(t, ep.Info()) -} - -func reexecSetKey(key string, containerID string, controllerID string) error { - var ( - state libcontainer.State - b []byte - err error - ) - - state.NamespacePaths = make(map[configs.NamespaceType]string) - state.NamespacePaths[configs.NamespaceType("NEWNET")] = key - if b, err = json.Marshal(state); err != nil { - return err - } - cmd := &exec.Cmd{ - Path: reexec.Self(), - Args: append([]string{"libnetwork-setkey"}, containerID, controllerID), - Stdin: strings.NewReader(string(b)), - Stdout: os.Stdout, - Stderr: os.Stderr, - } - return cmd.Run() -} - -func TestEndpointDeleteWithActiveContainer(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork", - }, - }, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := n.Delete(); err != nil { - t.Fatal(err) - } - }() - - ep, err := n.CreateEndpoint("ep1") - if err != nil { - t.Fatal(err) - } - defer func() { - err = ep.Delete(false) - if err != nil { - t.Fatal(err) - } - }() - - cnt, err := controller.NewSandbox(containerID, - libnetwork.OptionHostname("test"), - libnetwork.OptionDomainname("docker.io"), - libnetwork.OptionExtraHost("web", "192.168.0.1")) - defer func() { - if err := cnt.Delete(); err != nil { - t.Fatal(err) - } - }() - - err = ep.Join(cnt) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - defer func() { - err = ep.Leave(cnt) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - }() - - err = ep.Delete(false) - if err == nil { - t.Fatal("Expected to fail. But instead succeeded") - } - - if _, ok := err.(*libnetwork.ActiveContainerError); !ok { - t.Fatalf("Did not fail with expected error. Actual error: %v", err) - } -} - -func TestEndpointMultipleJoins(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - n, err := createTestNetwork(bridgeNetType, "testmultiple", options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testmultiple", - }, - }, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := n.Delete(); err != nil { - t.Fatal(err) - } - }() - - ep, err := n.CreateEndpoint("ep1") - if err != nil { - t.Fatal(err) - } - defer func() { - if err := ep.Delete(false); err != nil { - t.Fatal(err) - } - }() - - sbx1, err := controller.NewSandbox(containerID, - libnetwork.OptionHostname("test"), - libnetwork.OptionDomainname("docker.io"), - libnetwork.OptionExtraHost("web", "192.168.0.1")) - defer func() { - if err := sbx1.Delete(); err != nil { - t.Fatal(err) - } - }() - - sbx2, err := controller.NewSandbox("c2") - defer func() { - if err := sbx2.Delete(); err != nil { - t.Fatal(err) - } - runtime.LockOSThread() - }() - - err = ep.Join(sbx1) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - defer func() { - err = ep.Leave(sbx1) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - }() - - err = ep.Join(sbx2) - if err == nil { - t.Fatal("Expected to fail multiple joins for the same endpoint") - } - - if _, ok := err.(types.ForbiddenError); !ok { - t.Fatalf("Failed with unexpected error type: %T. Desc: %s", err, err.Error()) - } - -} - -func TestLeaveAll(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork", - }, - }, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - // If this goes through, it means cnt.Delete() effectively detached from all the endpoints - if err := n.Delete(); err != nil { - t.Fatal(err) - } - }() - - ep1, err := n.CreateEndpoint("ep1") - if err != nil { - t.Fatal(err) - } - - ep2, err := n.CreateEndpoint("ep2") - if err != nil { - t.Fatal(err) - } - - cnt, err := controller.NewSandbox("leaveall") - if err != nil { - t.Fatal(err) - } - - err = ep1.Join(cnt) - if err != nil { - t.Fatalf("Failed to join ep1: %v", err) - } - runtime.LockOSThread() - - err = ep2.Join(cnt) - if err != nil { - t.Fatalf("Failed to join ep2: %v", err) - } - runtime.LockOSThread() - - err = cnt.Delete() - if err != nil { - t.Fatal(err) - } -} - -func TestontainerInvalidLeave(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork", - }, - }, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := n.Delete(); err != nil { - t.Fatal(err) - } - }() - - ep, err := n.CreateEndpoint("ep1") - if err != nil { - t.Fatal(err) - } - defer func() { - if err := ep.Delete(false); err != nil { - t.Fatal(err) - } - }() - - cnt, err := controller.NewSandbox(containerID, - libnetwork.OptionHostname("test"), - libnetwork.OptionDomainname("docker.io"), - libnetwork.OptionExtraHost("web", "192.168.0.1")) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := cnt.Delete(); err != nil { - t.Fatal(err) - } - }() - - err = ep.Leave(cnt) - if err == nil { - t.Fatal("Expected to fail leave from an endpoint which has no active join") - } - if _, ok := err.(types.ForbiddenError); !ok { - t.Fatalf("Failed with unexpected error type: %T. Desc: %s", err, err.Error()) - } - - if err := ep.Leave(nil); err == nil { - t.Fatalf("Expected to fail leave nil Sandbox") - } - if _, ok := err.(types.BadRequestError); !ok { - t.Fatalf("Unexpected error type returned: %T", err) - } - - fsbx := &fakeSandbox{} - if err = ep.Leave(fsbx); err == nil { - t.Fatalf("Expected to fail leave with invalid Sandbox") - } - if _, ok := err.(types.BadRequestError); !ok { - t.Fatalf("Unexpected error type returned: %T", err) - } -} - -func TestEndpointUpdateParent(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - n, err := createTestNetwork("bridge", "testnetwork", options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork", - }, - }, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := n.Delete(); err != nil { - t.Fatal(err) - } - }() - - ep1, err := n.CreateEndpoint("ep1") - if err != nil { - t.Fatal(err) - } - - ep2, err := n.CreateEndpoint("ep2") - if err != nil { - t.Fatal(err) - } - - sbx1, err := controller.NewSandbox(containerID, - libnetwork.OptionHostname("test"), - libnetwork.OptionDomainname("docker.io"), - libnetwork.OptionExtraHost("web", "192.168.0.1")) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := sbx1.Delete(); err != nil { - t.Fatal(err) - } - }() - - sbx2, err := controller.NewSandbox("c2", - libnetwork.OptionHostname("test2"), - libnetwork.OptionDomainname("docker.io"), - libnetwork.OptionHostsPath("/var/lib/docker/test_network/container2/hosts"), - libnetwork.OptionExtraHost("web", "192.168.0.2")) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := sbx2.Delete(); err != nil { - t.Fatal(err) - } - }() - - err = ep1.Join(sbx1) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - - err = ep2.Join(sbx2) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } -} - -func TestEnableIPv6(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\nnameserver 2001:4860:4860::8888\n") - expectedResolvConf := []byte("search pommesfrites.fr\nnameserver 127.0.0.11\noptions ndots:0\n") - //take a copy of resolv.conf for restoring after test completes - resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") - if err != nil { - t.Fatal(err) - } - //cleanup - defer func() { - if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { - t.Fatal(err) - } - }() - - netOption := options.Generic{ - netlabel.EnableIPv6: true, - netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork", - }, - } - ipamV6ConfList := []*libnetwork.IpamConf{&libnetwork.IpamConf{PreferredPool: "fe99::/64", Gateway: "fe99::9"}} - - n, err := createTestNetwork("bridge", "testnetwork", netOption, nil, ipamV6ConfList) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := n.Delete(); err != nil { - t.Fatal(err) - } - }() - - ep1, err := n.CreateEndpoint("ep1") - if err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { - t.Fatal(err) - } - - resolvConfPath := "/tmp/libnetwork_test/resolv.conf" - defer os.Remove(resolvConfPath) - - sb, err := controller.NewSandbox(containerID, libnetwork.OptionResolvConfPath(resolvConfPath)) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := sb.Delete(); err != nil { - t.Fatal(err) - } - }() - - err = ep1.Join(sb) - if err != nil { - t.Fatal(err) - } - - content, err := ioutil.ReadFile(resolvConfPath) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(content, expectedResolvConf) { - t.Fatalf("Expected:\n%s\nGot:\n%s", string(expectedResolvConf), string(content)) - } - - if err != nil { - t.Fatal(err) - } -} - -func TestResolvConfHost(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - tmpResolvConf := []byte("search localhost.net\nnameserver 127.0.0.1\nnameserver 2001:4860:4860::8888\n") - - //take a copy of resolv.conf for restoring after test completes - resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") - if err != nil { - t.Fatal(err) - } - //cleanup - defer func() { - if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { - t.Fatal(err) - } - }() - - n, err := controller.NetworkByName("testhost") - if err != nil { - t.Fatal(err) - } - - ep1, err := n.CreateEndpoint("ep1", libnetwork.CreateOptionDisableResolution()) - if err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { - t.Fatal(err) - } - - resolvConfPath := "/tmp/libnetwork_test/resolv.conf" - defer os.Remove(resolvConfPath) - - sb, err := controller.NewSandbox(containerID, - libnetwork.OptionResolvConfPath(resolvConfPath), - libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf")) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := sb.Delete(); err != nil { - t.Fatal(err) - } - }() - - err = ep1.Join(sb) - if err != nil { - t.Fatal(err) - } - defer func() { - err = ep1.Leave(sb) - if err != nil { - t.Fatal(err) - } - }() - - finfo, err := os.Stat(resolvConfPath) - if err != nil { - t.Fatal(err) - } - - fmode := (os.FileMode)(0644) - if finfo.Mode() != fmode { - t.Fatalf("Expected file mode %s, got %s", fmode.String(), finfo.Mode().String()) - } - - content, err := ioutil.ReadFile(resolvConfPath) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(content, tmpResolvConf) { - t.Fatalf("Expected:\n%s\nGot:\n%s", string(tmpResolvConf), string(content)) - } -} - -func TestResolvConf(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - tmpResolvConf1 := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\nnameserver 2001:4860:4860::8888\n") - tmpResolvConf2 := []byte("search pommesfrites.fr\nnameserver 112.34.56.78\nnameserver 2001:4860:4860::8888\n") - expectedResolvConf1 := []byte("search pommesfrites.fr\nnameserver 127.0.0.11\noptions ndots:0\n") - tmpResolvConf3 := []byte("search pommesfrites.fr\nnameserver 113.34.56.78\n") - - //take a copy of resolv.conf for restoring after test completes - resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") - if err != nil { - t.Fatal(err) - } - //cleanup - defer func() { - if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { - t.Fatal(err) - } - }() - - netOption := options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork", - }, - } - n, err := createTestNetwork("bridge", "testnetwork", netOption, nil, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := n.Delete(); err != nil { - t.Fatal(err) - } - }() - - ep, err := n.CreateEndpoint("ep") - if err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf1, 0644); err != nil { - t.Fatal(err) - } - - resolvConfPath := "/tmp/libnetwork_test/resolv.conf" - defer os.Remove(resolvConfPath) - - sb1, err := controller.NewSandbox(containerID, libnetwork.OptionResolvConfPath(resolvConfPath)) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := sb1.Delete(); err != nil { - t.Fatal(err) - } - }() - - err = ep.Join(sb1) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - - finfo, err := os.Stat(resolvConfPath) - if err != nil { - t.Fatal(err) - } - - fmode := (os.FileMode)(0644) - if finfo.Mode() != fmode { - t.Fatalf("Expected file mode %s, got %s", fmode.String(), finfo.Mode().String()) - } - - content, err := ioutil.ReadFile(resolvConfPath) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(content, expectedResolvConf1) { - fmt.Printf("\n%v\n%v\n", expectedResolvConf1, content) - t.Fatalf("Expected:\n%s\nGot:\n%s", string(expectedResolvConf1), string(content)) - } - - err = ep.Leave(sb1) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf2, 0644); err != nil { - t.Fatal(err) - } - - sb2, err := controller.NewSandbox(containerID+"_2", libnetwork.OptionResolvConfPath(resolvConfPath)) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := sb2.Delete(); err != nil { - t.Fatal(err) - } - }() - - err = ep.Join(sb2) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - - content, err = ioutil.ReadFile(resolvConfPath) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(content, expectedResolvConf1) { - t.Fatalf("Expected:\n%s\nGot:\n%s", string(expectedResolvConf1), string(content)) - } - - if err := ioutil.WriteFile(resolvConfPath, tmpResolvConf3, 0644); err != nil { - t.Fatal(err) - } - - err = ep.Leave(sb2) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - - err = ep.Join(sb2) - runtime.LockOSThread() - if err != nil { - t.Fatal(err) - } - - content, err = ioutil.ReadFile(resolvConfPath) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(content, tmpResolvConf3) { - t.Fatalf("Expected:\n%s\nGot:\n%s", string(tmpResolvConf3), string(content)) - } -} - -func TestInvalidRemoteDriver(t *testing.T) { - if !testutils.IsRunningInContainer() { - t.Skip("Skipping test when not running inside a Container") - } - - mux := http.NewServeMux() - server := httptest.NewServer(mux) - if server == nil { - t.Fatal("Failed to start a HTTP Server") - } - defer server.Close() - - type pluginRequest struct { - name string - } - - mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - fmt.Fprintln(w, `{"Implements": ["InvalidDriver"]}`) - }) - - if err := os.MkdirAll("/etc/docker/plugins", 0755); err != nil { - t.Fatal(err) - } - defer func() { - if err := os.RemoveAll("/etc/docker/plugins"); err != nil { - t.Fatal(err) - } - }() - - if err := ioutil.WriteFile("/etc/docker/plugins/invalid-network-driver.spec", []byte(server.URL), 0644); err != nil { - t.Fatal(err) - } - - ctrlr, err := libnetwork.New() - if err != nil { - t.Fatal(err) - } - defer ctrlr.Stop() - - _, err = ctrlr.NewNetwork("invalid-network-driver", "dummy", - libnetwork.NetworkOptionGeneric(getEmptyGenericOption())) - if err == nil { - t.Fatal("Expected to fail. But instead succeeded") - } - - if err != plugins.ErrNotImplements { - t.Fatalf("Did not fail with expected error. Actual error: %v", err) - } -} - -func TestValidRemoteDriver(t *testing.T) { - if !testutils.IsRunningInContainer() { - t.Skip("Skipping test when not running inside a Container") - } - - mux := http.NewServeMux() - server := httptest.NewServer(mux) - if server == nil { - t.Fatal("Failed to start a HTTP Server") - } - defer server.Close() - - type pluginRequest struct { - name string - } - - mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - fmt.Fprintf(w, `{"Implements": ["%s"]}`, driverapi.NetworkPluginEndpointType) - }) - mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - fmt.Fprintf(w, "null") - }) - - if err := os.MkdirAll("/etc/docker/plugins", 0755); err != nil { - t.Fatal(err) - } - defer func() { - if err := os.RemoveAll("/etc/docker/plugins"); err != nil { - t.Fatal(err) - } - }() - - if err := ioutil.WriteFile("/etc/docker/plugins/valid-network-driver.spec", []byte(server.URL), 0644); err != nil { - t.Fatal(err) - } - - n, err := controller.NewNetwork("valid-network-driver", "dummy", - libnetwork.NetworkOptionGeneric(getEmptyGenericOption())) - if err != nil { - // Only fail if we could not find the plugin driver - if _, ok := err.(types.NotFoundError); ok { - t.Fatal(err) - } - return - } - defer func() { - if err := n.Delete(); err != nil { - t.Fatal(err) - } - }() -} - -var ( - once sync.Once - start = make(chan struct{}) - done = make(chan chan struct{}, numThreads-1) - origns = netns.None() - testns = netns.None() - sboxes = make([]libnetwork.Sandbox, numThreads) -) - -const ( - iterCnt = 25 - numThreads = 3 - first = 1 - last = numThreads - debug = false -) - -func createGlobalInstance(t *testing.T) { - var err error - defer close(start) - - origns, err = netns.Get() - if err != nil { - t.Fatal(err) - } - - if testutils.IsRunningInContainer() { - testns = origns - } else { - testns, err = netns.New() - if err != nil { - t.Fatal(err) - } - } - - netOption := options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": "network", - }, - } - - net1, err := controller.NetworkByName("testhost") - if err != nil { - t.Fatal(err) - } - - net2, err := createTestNetwork("bridge", "network2", netOption, nil, nil) - if err != nil { - t.Fatal(err) - } - - _, err = net1.CreateEndpoint("pep1") - if err != nil { - t.Fatal(err) - } - - _, err = net2.CreateEndpoint("pep2") - if err != nil { - t.Fatal(err) - } - - _, err = net2.CreateEndpoint("pep3") - if err != nil { - t.Fatal(err) - } - - if sboxes[first-1], err = controller.NewSandbox(fmt.Sprintf("%drace", first), libnetwork.OptionUseDefaultSandbox()); err != nil { - t.Fatal(err) - } - for thd := first + 1; thd <= last; thd++ { - if sboxes[thd-1], err = controller.NewSandbox(fmt.Sprintf("%drace", thd)); err != nil { - t.Fatal(err) - } - } -} - -func debugf(format string, a ...interface{}) (int, error) { - if debug { - return fmt.Printf(format, a...) - } - - return 0, nil -} - -func parallelJoin(t *testing.T, rc libnetwork.Sandbox, ep libnetwork.Endpoint, thrNumber int) { - debugf("J%d.", thrNumber) - var err error - - sb := sboxes[thrNumber-1] - err = ep.Join(sb) - - runtime.LockOSThread() - if err != nil { - if _, ok := err.(types.ForbiddenError); !ok { - t.Fatalf("thread %d: %v", thrNumber, err) - } - debugf("JE%d(%v).", thrNumber, err) - } - debugf("JD%d.", thrNumber) -} - -func parallelLeave(t *testing.T, rc libnetwork.Sandbox, ep libnetwork.Endpoint, thrNumber int) { - debugf("L%d.", thrNumber) - var err error - - sb := sboxes[thrNumber-1] - - err = ep.Leave(sb) - runtime.LockOSThread() - if err != nil { - if _, ok := err.(types.ForbiddenError); !ok { - t.Fatalf("thread %d: %v", thrNumber, err) - } - debugf("LE%d(%v).", thrNumber, err) - } - debugf("LD%d.", thrNumber) -} - -func runParallelTests(t *testing.T, thrNumber int) { - var ( - ep libnetwork.Endpoint - sb libnetwork.Sandbox - err error - ) - - t.Parallel() - - pTest := flag.Lookup("test.parallel") - if pTest == nil { - t.Skip("Skipped because test.parallel flag not set;") - } - numParallel, err := strconv.Atoi(pTest.Value.String()) - if err != nil { - t.Fatal(err) - } - if numParallel < numThreads { - t.Skip("Skipped because t.parallel was less than ", numThreads) - } - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - if thrNumber == first { - createGlobalInstance(t) - } - - if thrNumber != first { - select { - case <-start: - } - - thrdone := make(chan struct{}) - done <- thrdone - defer close(thrdone) - - if thrNumber == last { - defer close(done) - } - - err = netns.Set(testns) - if err != nil { - t.Fatal(err) - } - } - defer netns.Set(origns) - - net1, err := controller.NetworkByName("testhost") - if err != nil { - t.Fatal(err) - } - if net1 == nil { - t.Fatal("Could not find testhost") - } - - net2, err := controller.NetworkByName("network2") - if err != nil { - t.Fatal(err) - } - if net2 == nil { - t.Fatal("Could not find network2") - } - - epName := fmt.Sprintf("pep%d", thrNumber) - - if thrNumber == first { - ep, err = net1.EndpointByName(epName) - } else { - ep, err = net2.EndpointByName(epName) - } - - if err != nil { - t.Fatal(err) - } - if ep == nil { - t.Fatal("Got nil ep with no error") - } - - cid := fmt.Sprintf("%drace", thrNumber) - controller.WalkSandboxes(libnetwork.SandboxContainerWalker(&sb, cid)) - if sb == nil { - t.Fatalf("Got nil sandbox for container: %s", cid) - } - - for i := 0; i < iterCnt; i++ { - parallelJoin(t, sb, ep, thrNumber) - parallelLeave(t, sb, ep, thrNumber) - } - - debugf("\n") - - err = sb.Delete() - if err != nil { - t.Fatal(err) - } - if thrNumber == first { - for thrdone := range done { - select { - case <-thrdone: - } - } - - testns.Close() - if err := net2.Delete(); err != nil { - t.Fatal(err) - } - } else { - err = ep.Delete(false) - if err != nil { - t.Fatal(err) - } - } -} - -func TestParallel1(t *testing.T) { - runParallelTests(t, 1) -} - -func TestParallel2(t *testing.T) { - runParallelTests(t, 2) -} - -func TestParallel3(t *testing.T) { - runParallelTests(t, 3) -} diff --git a/vendor/github.com/docker/libnetwork/network.go b/vendor/github.com/docker/libnetwork/network.go deleted file mode 100644 index 7449c90a..00000000 --- a/vendor/github.com/docker/libnetwork/network.go +++ /dev/null @@ -1,1224 +0,0 @@ -package libnetwork - -import ( - "encoding/json" - "fmt" - "net" - "strconv" - "strings" - "sync" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/libnetwork/config" - "github.com/docker/libnetwork/datastore" - "github.com/docker/libnetwork/driverapi" - "github.com/docker/libnetwork/etchosts" - "github.com/docker/libnetwork/ipamapi" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/netutils" - "github.com/docker/libnetwork/options" - "github.com/docker/libnetwork/types" -) - -// A Network represents a logical connectivity zone that containers may -// join using the Link method. A Network is managed by a specific driver. -type Network interface { - // A user chosen name for this network. - Name() string - - // A system generated id for this network. - ID() string - - // The type of network, which corresponds to its managing driver. - Type() string - - // Create a new endpoint to this network symbolically identified by the - // specified unique name. The options parameter carry driver specific options. - CreateEndpoint(name string, options ...EndpointOption) (Endpoint, error) - - // Delete the network. - Delete() error - - // Endpoints returns the list of Endpoint(s) in this network. - Endpoints() []Endpoint - - // WalkEndpoints uses the provided function to walk the Endpoints - WalkEndpoints(walker EndpointWalker) - - // EndpointByName returns the Endpoint which has the passed name. If not found, the error ErrNoSuchEndpoint is returned. - EndpointByName(name string) (Endpoint, error) - - // EndpointByID returns the Endpoint which has the passed id. If not found, the error ErrNoSuchEndpoint is returned. - EndpointByID(id string) (Endpoint, error) - - // Return certain operational data belonging to this network - Info() NetworkInfo -} - -// NetworkInfo returns some configuration and operational information about the network -type NetworkInfo interface { - IpamConfig() (string, map[string]string, []*IpamConf, []*IpamConf) - IpamInfo() ([]*IpamInfo, []*IpamInfo) - DriverOptions() map[string]string - Scope() string - Internal() bool -} - -// EndpointWalker is a client provided function which will be used to walk the Endpoints. -// When the function returns true, the walk will stop. -type EndpointWalker func(ep Endpoint) bool - -type svcInfo struct { - svcMap map[string][]net.IP - ipMap map[string]string -} - -// IpamConf contains all the ipam related configurations for a network -type IpamConf struct { - // The master address pool for containers and network interfaces - PreferredPool string - // A subset of the master pool. If specified, - // this becomes the container pool - SubPool string - // Preferred Network Gateway address (optional) - Gateway string - // Auxiliary addresses for network driver. Must be within the master pool. - // libnetwork will reserve them if they fall into the container pool - AuxAddresses map[string]string -} - -// Validate checks whether the configuration is valid -func (c *IpamConf) Validate() error { - if c.Gateway != "" && nil == net.ParseIP(c.Gateway) { - return types.BadRequestErrorf("invalid gateway address %s in Ipam configuration", c.Gateway) - } - return nil -} - -// IpamInfo contains all the ipam related operational info for a network -type IpamInfo struct { - PoolID string - Meta map[string]string - driverapi.IPAMData -} - -// MarshalJSON encodes IpamInfo into json message -func (i *IpamInfo) MarshalJSON() ([]byte, error) { - m := map[string]interface{}{ - "PoolID": i.PoolID, - } - v, err := json.Marshal(&i.IPAMData) - if err != nil { - return nil, err - } - m["IPAMData"] = string(v) - - if i.Meta != nil { - m["Meta"] = i.Meta - } - return json.Marshal(m) -} - -// UnmarshalJSON decodes json message into PoolData -func (i *IpamInfo) UnmarshalJSON(data []byte) error { - var ( - m map[string]interface{} - err error - ) - if err = json.Unmarshal(data, &m); err != nil { - return err - } - i.PoolID = m["PoolID"].(string) - if v, ok := m["Meta"]; ok { - b, _ := json.Marshal(v) - if err = json.Unmarshal(b, &i.Meta); err != nil { - return err - } - } - if v, ok := m["IPAMData"]; ok { - if err = json.Unmarshal([]byte(v.(string)), &i.IPAMData); err != nil { - return err - } - } - return nil -} - -type network struct { - ctrlr *controller - name string - networkType string - id string - ipamType string - ipamOptions map[string]string - addrSpace string - ipamV4Config []*IpamConf - ipamV6Config []*IpamConf - ipamV4Info []*IpamInfo - ipamV6Info []*IpamInfo - enableIPv6 bool - postIPv6 bool - epCnt *endpointCnt - generic options.Generic - dbIndex uint64 - dbExists bool - persist bool - stopWatchCh chan struct{} - drvOnce *sync.Once - internal bool - sync.Mutex -} - -func (n *network) Name() string { - n.Lock() - defer n.Unlock() - - return n.name -} - -func (n *network) ID() string { - n.Lock() - defer n.Unlock() - - return n.id -} - -func (n *network) Type() string { - n.Lock() - defer n.Unlock() - - return n.networkType -} - -func (n *network) Key() []string { - n.Lock() - defer n.Unlock() - return []string{datastore.NetworkKeyPrefix, n.id} -} - -func (n *network) KeyPrefix() []string { - return []string{datastore.NetworkKeyPrefix} -} - -func (n *network) Value() []byte { - n.Lock() - defer n.Unlock() - b, err := json.Marshal(n) - if err != nil { - return nil - } - return b -} - -func (n *network) SetValue(value []byte) error { - return json.Unmarshal(value, n) -} - -func (n *network) Index() uint64 { - n.Lock() - defer n.Unlock() - return n.dbIndex -} - -func (n *network) SetIndex(index uint64) { - n.Lock() - n.dbIndex = index - n.dbExists = true - n.Unlock() -} - -func (n *network) Exists() bool { - n.Lock() - defer n.Unlock() - return n.dbExists -} - -func (n *network) Skip() bool { - n.Lock() - defer n.Unlock() - return !n.persist -} - -func (n *network) New() datastore.KVObject { - n.Lock() - defer n.Unlock() - - return &network{ - ctrlr: n.ctrlr, - drvOnce: &sync.Once{}, - } -} - -// CopyTo deep copies to the destination IpamConfig -func (c *IpamConf) CopyTo(dstC *IpamConf) error { - dstC.PreferredPool = c.PreferredPool - dstC.SubPool = c.SubPool - dstC.Gateway = c.Gateway - if c.AuxAddresses != nil { - dstC.AuxAddresses = make(map[string]string, len(c.AuxAddresses)) - for k, v := range c.AuxAddresses { - dstC.AuxAddresses[k] = v - } - } - return nil -} - -// CopyTo deep copies to the destination IpamInfo -func (i *IpamInfo) CopyTo(dstI *IpamInfo) error { - dstI.PoolID = i.PoolID - if i.Meta != nil { - dstI.Meta = make(map[string]string) - for k, v := range i.Meta { - dstI.Meta[k] = v - } - } - - dstI.AddressSpace = i.AddressSpace - dstI.Pool = types.GetIPNetCopy(i.Pool) - dstI.Gateway = types.GetIPNetCopy(i.Gateway) - - if i.AuxAddresses != nil { - dstI.AuxAddresses = make(map[string]*net.IPNet) - for k, v := range i.AuxAddresses { - dstI.AuxAddresses[k] = types.GetIPNetCopy(v) - } - } - - return nil -} - -func (n *network) CopyTo(o datastore.KVObject) error { - n.Lock() - defer n.Unlock() - - dstN := o.(*network) - dstN.name = n.name - dstN.id = n.id - dstN.networkType = n.networkType - dstN.ipamType = n.ipamType - dstN.enableIPv6 = n.enableIPv6 - dstN.persist = n.persist - dstN.postIPv6 = n.postIPv6 - dstN.dbIndex = n.dbIndex - dstN.dbExists = n.dbExists - dstN.drvOnce = n.drvOnce - dstN.internal = n.internal - - for _, v4conf := range n.ipamV4Config { - dstV4Conf := &IpamConf{} - v4conf.CopyTo(dstV4Conf) - dstN.ipamV4Config = append(dstN.ipamV4Config, dstV4Conf) - } - - for _, v4info := range n.ipamV4Info { - dstV4Info := &IpamInfo{} - v4info.CopyTo(dstV4Info) - dstN.ipamV4Info = append(dstN.ipamV4Info, dstV4Info) - } - - for _, v6conf := range n.ipamV6Config { - dstV6Conf := &IpamConf{} - v6conf.CopyTo(dstV6Conf) - dstN.ipamV6Config = append(dstN.ipamV6Config, dstV6Conf) - } - - for _, v6info := range n.ipamV6Info { - dstV6Info := &IpamInfo{} - v6info.CopyTo(dstV6Info) - dstN.ipamV6Info = append(dstN.ipamV6Info, dstV6Info) - } - - dstN.generic = options.Generic{} - for k, v := range n.generic { - dstN.generic[k] = v - } - - return nil -} - -func (n *network) DataScope() string { - return n.driverScope() -} - -func (n *network) getEpCnt() *endpointCnt { - n.Lock() - defer n.Unlock() - - return n.epCnt -} - -// TODO : Can be made much more generic with the help of reflection (but has some golang limitations) -func (n *network) MarshalJSON() ([]byte, error) { - netMap := make(map[string]interface{}) - netMap["name"] = n.name - netMap["id"] = n.id - netMap["networkType"] = n.networkType - netMap["ipamType"] = n.ipamType - netMap["addrSpace"] = n.addrSpace - netMap["enableIPv6"] = n.enableIPv6 - if n.generic != nil { - netMap["generic"] = n.generic - } - netMap["persist"] = n.persist - netMap["postIPv6"] = n.postIPv6 - if len(n.ipamV4Config) > 0 { - ics, err := json.Marshal(n.ipamV4Config) - if err != nil { - return nil, err - } - netMap["ipamV4Config"] = string(ics) - } - if len(n.ipamV4Info) > 0 { - iis, err := json.Marshal(n.ipamV4Info) - if err != nil { - return nil, err - } - netMap["ipamV4Info"] = string(iis) - } - if len(n.ipamV6Config) > 0 { - ics, err := json.Marshal(n.ipamV6Config) - if err != nil { - return nil, err - } - netMap["ipamV6Config"] = string(ics) - } - if len(n.ipamV6Info) > 0 { - iis, err := json.Marshal(n.ipamV6Info) - if err != nil { - return nil, err - } - netMap["ipamV6Info"] = string(iis) - } - netMap["internal"] = n.internal - return json.Marshal(netMap) -} - -// TODO : Can be made much more generic with the help of reflection (but has some golang limitations) -func (n *network) UnmarshalJSON(b []byte) (err error) { - var netMap map[string]interface{} - if err := json.Unmarshal(b, &netMap); err != nil { - return err - } - n.name = netMap["name"].(string) - n.id = netMap["id"].(string) - n.networkType = netMap["networkType"].(string) - n.enableIPv6 = netMap["enableIPv6"].(bool) - - if v, ok := netMap["generic"]; ok { - n.generic = v.(map[string]interface{}) - // Restore opts in their map[string]string form - if v, ok := n.generic[netlabel.GenericData]; ok { - var lmap map[string]string - ba, err := json.Marshal(v) - if err != nil { - return err - } - if err := json.Unmarshal(ba, &lmap); err != nil { - return err - } - n.generic[netlabel.GenericData] = lmap - } - } - if v, ok := netMap["persist"]; ok { - n.persist = v.(bool) - } - if v, ok := netMap["postIPv6"]; ok { - n.postIPv6 = v.(bool) - } - if v, ok := netMap["ipamType"]; ok { - n.ipamType = v.(string) - } else { - n.ipamType = ipamapi.DefaultIPAM - } - if v, ok := netMap["addrSpace"]; ok { - n.addrSpace = v.(string) - } - if v, ok := netMap["ipamV4Config"]; ok { - if err := json.Unmarshal([]byte(v.(string)), &n.ipamV4Config); err != nil { - return err - } - } - if v, ok := netMap["ipamV4Info"]; ok { - if err := json.Unmarshal([]byte(v.(string)), &n.ipamV4Info); err != nil { - return err - } - } - if v, ok := netMap["ipamV6Config"]; ok { - if err := json.Unmarshal([]byte(v.(string)), &n.ipamV6Config); err != nil { - return err - } - } - if v, ok := netMap["ipamV6Info"]; ok { - if err := json.Unmarshal([]byte(v.(string)), &n.ipamV6Info); err != nil { - return err - } - } - if v, ok := netMap["internal"]; ok { - n.internal = v.(bool) - } - return nil -} - -// NetworkOption is a option setter function type used to pass varios options to -// NewNetwork method. The various setter functions of type NetworkOption are -// provided by libnetwork, they look like NetworkOptionXXXX(...) -type NetworkOption func(n *network) - -// NetworkOptionGeneric function returns an option setter for a Generic option defined -// in a Dictionary of Key-Value pair -func NetworkOptionGeneric(generic map[string]interface{}) NetworkOption { - return func(n *network) { - n.generic = generic - if _, ok := generic[netlabel.EnableIPv6]; ok { - n.enableIPv6 = generic[netlabel.EnableIPv6].(bool) - } - } -} - -// NetworkOptionPersist returns an option setter to set persistence policy for a network -func NetworkOptionPersist(persist bool) NetworkOption { - return func(n *network) { - n.persist = persist - } -} - -// NetworkOptionInternalNetwork returns an option setter to config the network -// to be internal which disables default gateway service -func NetworkOptionInternalNetwork() NetworkOption { - return func(n *network) { - n.internal = true - if n.generic == nil { - n.generic = make(map[string]interface{}) - } - n.generic[netlabel.Internal] = true - } -} - -// NetworkOptionIpam function returns an option setter for the ipam configuration for this network -func NetworkOptionIpam(ipamDriver string, addrSpace string, ipV4 []*IpamConf, ipV6 []*IpamConf, opts map[string]string) NetworkOption { - return func(n *network) { - if ipamDriver != "" { - n.ipamType = ipamDriver - } - n.ipamOptions = opts - n.addrSpace = addrSpace - n.ipamV4Config = ipV4 - n.ipamV6Config = ipV6 - } -} - -// NetworkOptionDriverOpts function returns an option setter for any parameter described by a map -func NetworkOptionDriverOpts(opts map[string]string) NetworkOption { - return func(n *network) { - if n.generic == nil { - n.generic = make(map[string]interface{}) - } - if opts == nil { - opts = make(map[string]string) - } - // Store the options - n.generic[netlabel.GenericData] = opts - // Decode and store the endpoint options of libnetwork interest - if val, ok := opts[netlabel.EnableIPv6]; ok { - var err error - if n.enableIPv6, err = strconv.ParseBool(val); err != nil { - log.Warnf("Failed to parse %s' value: %s (%s)", netlabel.EnableIPv6, val, err.Error()) - } - } - } -} - -// NetworkOptionDeferIPv6Alloc instructs the network to defer the IPV6 address allocation until after the endpoint has been created -// It is being provided to support the specific docker daemon flags where user can deterministically assign an IPv6 address -// to a container as combination of fixed-cidr-v6 + mac-address -// TODO: Remove this option setter once we support endpoint ipam options -func NetworkOptionDeferIPv6Alloc(enable bool) NetworkOption { - return func(n *network) { - n.postIPv6 = enable - } -} - -func (n *network) processOptions(options ...NetworkOption) { - for _, opt := range options { - if opt != nil { - opt(n) - } - } -} - -func (n *network) driverScope() string { - c := n.getController() - - c.Lock() - // Check if a driver for the specified network type is available - dd, ok := c.drivers[n.networkType] - c.Unlock() - - if !ok { - var err error - dd, err = c.loadDriver(n.networkType) - if err != nil { - // If driver could not be resolved simply return an empty string - return "" - } - } - - return dd.capability.DataScope -} - -func (n *network) driver() (driverapi.Driver, error) { - c := n.getController() - - c.Lock() - // Check if a driver for the specified network type is available - dd, ok := c.drivers[n.networkType] - c.Unlock() - - if !ok { - var err error - dd, err = c.loadDriver(n.networkType) - if err != nil { - return nil, err - } - } - - return dd.driver, nil -} - -func (n *network) Delete() error { - n.Lock() - c := n.ctrlr - name := n.name - id := n.id - n.Unlock() - - n, err := c.getNetworkFromStore(id) - if err != nil { - return &UnknownNetworkError{name: name, id: id} - } - - numEps := n.getEpCnt().EndpointCnt() - if numEps != 0 { - return &ActiveEndpointsError{name: n.name, id: n.id} - } - - if err = n.deleteNetwork(); err != nil { - return err - } - defer func() { - if err != nil { - if e := c.addNetwork(n); e != nil { - log.Warnf("failed to rollback deleteNetwork for network %s: %v", - n.Name(), err) - } - } - }() - - // deleteFromStore performs an atomic delete operation and the - // network.epCnt will help prevent any possible - // race between endpoint join and network delete - if err = n.getController().deleteFromStore(n.getEpCnt()); err != nil { - return fmt.Errorf("error deleting network endpoint count from store: %v", err) - } - - n.ipamRelease() - - if err = n.getController().deleteFromStore(n); err != nil { - return fmt.Errorf("error deleting network from store: %v", err) - } - - return nil -} - -func (n *network) deleteNetwork() error { - d, err := n.driver() - if err != nil { - return fmt.Errorf("failed deleting network: %v", err) - } - - if err := d.DeleteNetwork(n.ID()); err != nil { - // Forbidden Errors should be honored - if _, ok := err.(types.ForbiddenError); ok { - return err - } - - if _, ok := err.(types.MaskableError); !ok { - log.Warnf("driver error deleting network %s : %v", n.name, err) - } - } - - return nil -} - -func (n *network) addEndpoint(ep *endpoint) error { - d, err := n.driver() - if err != nil { - return fmt.Errorf("failed to add endpoint: %v", err) - } - - err = d.CreateEndpoint(n.id, ep.id, ep.Interface(), ep.generic) - if err != nil { - return types.InternalErrorf("failed to create endpoint %s on network %s: %v", - ep.Name(), n.Name(), err) - } - - return nil -} - -func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoint, error) { - var err error - if !config.IsValidName(name) { - return nil, ErrInvalidName(name) - } - - if _, err = n.EndpointByName(name); err == nil { - return nil, types.ForbiddenErrorf("service endpoint with name %s already exists", name) - } - - ep := &endpoint{name: name, generic: make(map[string]interface{}), iface: &endpointInterface{}} - ep.id = stringid.GenerateRandomID() - - // Initialize ep.network with a possibly stale copy of n. We need this to get network from - // store. But once we get it from store we will have the most uptodate copy possible. - ep.network = n - ep.locator = n.getController().clusterHostID() - ep.network, err = ep.getNetworkFromStore() - if err != nil { - return nil, fmt.Errorf("failed to get network during CreateEndpoint: %v", err) - } - n = ep.network - - ep.processOptions(options...) - - if opt, ok := ep.generic[netlabel.MacAddress]; ok { - if mac, ok := opt.(net.HardwareAddr); ok { - ep.iface.mac = mac - } - } - - ipam, err := n.getController().getIPAM(n.ipamType) - if err != nil { - return nil, err - } - - if ipam.capability.RequiresMACAddress { - if ep.iface.mac == nil { - ep.iface.mac = netutils.GenerateRandomMAC() - } - if ep.ipamOptions == nil { - ep.ipamOptions = make(map[string]string) - } - ep.ipamOptions[netlabel.MacAddress] = ep.iface.mac.String() - } - - if err = ep.assignAddress(ipam.driver, true, !n.postIPv6); err != nil { - return nil, err - } - defer func() { - if err != nil { - ep.releaseAddress() - } - }() - - if err = n.addEndpoint(ep); err != nil { - return nil, err - } - defer func() { - if err != nil { - if e := ep.deleteEndpoint(); e != nil { - log.Warnf("cleaning up endpoint failed %s : %v", name, e) - } - } - }() - - if err = ep.assignAddress(ipam.driver, false, n.postIPv6); err != nil { - return nil, err - } - - if err = n.getController().updateToStore(ep); err != nil { - return nil, err - } - defer func() { - if err != nil { - if e := n.getController().deleteFromStore(ep); e != nil { - log.Warnf("error rolling back endpoint %s from store: %v", name, e) - } - } - }() - - // Watch for service records - n.getController().watchSvcRecord(ep) - defer func() { - if err != nil { - n.getController().unWatchSvcRecord(ep) - } - }() - - // Increment endpoint count to indicate completion of endpoint addition - if err = n.getEpCnt().IncEndpointCnt(); err != nil { - return nil, err - } - - return ep, nil -} - -func (n *network) Endpoints() []Endpoint { - var list []Endpoint - - endpoints, err := n.getEndpointsFromStore() - if err != nil { - log.Error(err) - } - - for _, ep := range endpoints { - list = append(list, ep) - } - - return list -} - -func (n *network) WalkEndpoints(walker EndpointWalker) { - for _, e := range n.Endpoints() { - if walker(e) { - return - } - } -} - -func (n *network) EndpointByName(name string) (Endpoint, error) { - if name == "" { - return nil, ErrInvalidName(name) - } - var e Endpoint - - s := func(current Endpoint) bool { - if current.Name() == name { - e = current - return true - } - return false - } - - n.WalkEndpoints(s) - - if e == nil { - return nil, ErrNoSuchEndpoint(name) - } - - return e, nil -} - -func (n *network) EndpointByID(id string) (Endpoint, error) { - if id == "" { - return nil, ErrInvalidID(id) - } - - ep, err := n.getEndpointFromStore(id) - if err != nil { - return nil, ErrNoSuchEndpoint(id) - } - - return ep, nil -} - -func (n *network) updateSvcRecord(ep *endpoint, localEps []*endpoint, isAdd bool) { - epName := ep.Name() - if iface := ep.Iface(); iface.Address() != nil { - myAliases := ep.MyAliases() - if isAdd { - if !ep.isAnonymous() { - n.addSvcRecords(epName, iface.Address().IP, true) - } - for _, alias := range myAliases { - n.addSvcRecords(alias, iface.Address().IP, false) - } - } else { - if !ep.isAnonymous() { - n.deleteSvcRecords(epName, iface.Address().IP, true) - } - for _, alias := range myAliases { - n.deleteSvcRecords(alias, iface.Address().IP, false) - } - } - } -} - -func (n *network) addSvcRecords(name string, epIP net.IP, ipMapUpdate bool) { - c := n.getController() - c.Lock() - defer c.Unlock() - sr, ok := c.svcDb[n.ID()] - if !ok { - sr = svcInfo{ - svcMap: make(map[string][]net.IP), - ipMap: make(map[string]string), - } - c.svcDb[n.ID()] = sr - } - - if ipMapUpdate { - reverseIP := netutils.ReverseIP(epIP.String()) - if _, ok := sr.ipMap[reverseIP]; !ok { - sr.ipMap[reverseIP] = name - } - } - - ipList := sr.svcMap[name] - for _, ip := range ipList { - if ip.Equal(epIP) { - return - } - } - sr.svcMap[name] = append(sr.svcMap[name], epIP) -} - -func (n *network) deleteSvcRecords(name string, epIP net.IP, ipMapUpdate bool) { - c := n.getController() - c.Lock() - defer c.Unlock() - sr, ok := c.svcDb[n.ID()] - if !ok { - return - } - - if ipMapUpdate { - delete(sr.ipMap, netutils.ReverseIP(epIP.String())) - } - - ipList := sr.svcMap[name] - for i, ip := range ipList { - if ip.Equal(epIP) { - ipList = append(ipList[:i], ipList[i+1:]...) - break - } - } - sr.svcMap[name] = ipList - - if len(ipList) == 0 { - delete(sr.svcMap, name) - } -} - -func (n *network) getSvcRecords(ep *endpoint) []etchosts.Record { - n.Lock() - defer n.Unlock() - - var recs []etchosts.Record - sr, _ := n.ctrlr.svcDb[n.id] - - for h, ip := range sr.svcMap { - if ep != nil && strings.Split(h, ".")[0] == ep.Name() { - continue - } - - recs = append(recs, etchosts.Record{ - Hosts: h, - IP: ip[0].String(), - }) - } - - return recs -} - -func (n *network) getController() *controller { - n.Lock() - defer n.Unlock() - return n.ctrlr -} - -func (n *network) ipamAllocate() error { - // For now also exclude bridge from using new ipam - if n.Type() == "host" || n.Type() == "null" { - return nil - } - - ipam, err := n.getController().getIpamDriver(n.ipamType) - if err != nil { - return err - } - - if n.addrSpace == "" { - if n.addrSpace, err = n.deriveAddressSpace(); err != nil { - return err - } - } - - err = n.ipamAllocateVersion(4, ipam) - if err != nil { - return err - } - - defer func() { - if err != nil { - n.ipamReleaseVersion(4, ipam) - } - }() - - return n.ipamAllocateVersion(6, ipam) -} - -func (n *network) ipamAllocateVersion(ipVer int, ipam ipamapi.Ipam) error { - var ( - cfgList *[]*IpamConf - infoList *[]*IpamInfo - err error - ) - - switch ipVer { - case 4: - cfgList = &n.ipamV4Config - infoList = &n.ipamV4Info - case 6: - cfgList = &n.ipamV6Config - infoList = &n.ipamV6Info - default: - return types.InternalErrorf("incorrect ip version passed to ipam allocate: %d", ipVer) - } - - if len(*cfgList) == 0 { - if ipVer == 6 { - return nil - } - *cfgList = []*IpamConf{&IpamConf{}} - } - - *infoList = make([]*IpamInfo, len(*cfgList)) - - log.Debugf("Allocating IPv%d pools for network %s (%s)", ipVer, n.Name(), n.ID()) - - for i, cfg := range *cfgList { - if err = cfg.Validate(); err != nil { - return err - } - d := &IpamInfo{} - (*infoList)[i] = d - - d.PoolID, d.Pool, d.Meta, err = ipam.RequestPool(n.addrSpace, cfg.PreferredPool, cfg.SubPool, n.ipamOptions, ipVer == 6) - if err != nil { - return err - } - - defer func() { - if err != nil { - if err := ipam.ReleasePool(d.PoolID); err != nil { - log.Warnf("Failed to release address pool %s after failure to create network %s (%s)", d.PoolID, n.Name(), n.ID()) - } - } - }() - - if gws, ok := d.Meta[netlabel.Gateway]; ok { - if d.Gateway, err = types.ParseCIDR(gws); err != nil { - return types.BadRequestErrorf("failed to parse gateway address (%v) returned by ipam driver: %v", gws, err) - } - } - - // If user requested a specific gateway, libnetwork will allocate it - // irrespective of whether ipam driver returned a gateway already. - // If none of the above is true, libnetwork will allocate one. - if cfg.Gateway != "" || d.Gateway == nil { - var gatewayOpts = map[string]string{ - ipamapi.RequestAddressType: netlabel.Gateway, - } - if d.Gateway, _, err = ipam.RequestAddress(d.PoolID, net.ParseIP(cfg.Gateway), gatewayOpts); err != nil { - return types.InternalErrorf("failed to allocate gateway (%v): %v", cfg.Gateway, err) - } - } - - // Auxiliary addresses must be part of the master address pool - // If they fall into the container addressable pool, libnetwork will reserve them - if cfg.AuxAddresses != nil { - var ip net.IP - d.IPAMData.AuxAddresses = make(map[string]*net.IPNet, len(cfg.AuxAddresses)) - for k, v := range cfg.AuxAddresses { - if ip = net.ParseIP(v); ip == nil { - return types.BadRequestErrorf("non parsable secondary ip address (%s:%s) passed for network %s", k, v, n.Name()) - } - if !d.Pool.Contains(ip) { - return types.ForbiddenErrorf("auxilairy address: (%s:%s) must belong to the master pool: %s", k, v, d.Pool) - } - // Attempt reservation in the container addressable pool, silent the error if address does not belong to that pool - if d.IPAMData.AuxAddresses[k], _, err = ipam.RequestAddress(d.PoolID, ip, nil); err != nil && err != ipamapi.ErrIPOutOfRange { - return types.InternalErrorf("failed to allocate secondary ip address (%s:%s): %v", k, v, err) - } - } - } - } - - return nil -} - -func (n *network) ipamRelease() { - // For now exclude host and null - if n.Type() == "host" || n.Type() == "null" { - return - } - ipam, err := n.getController().getIpamDriver(n.ipamType) - if err != nil { - log.Warnf("Failed to retrieve ipam driver to release address pool(s) on delete of network %s (%s): %v", n.Name(), n.ID(), err) - return - } - n.ipamReleaseVersion(4, ipam) - n.ipamReleaseVersion(6, ipam) -} - -func (n *network) ipamReleaseVersion(ipVer int, ipam ipamapi.Ipam) { - var infoList []*IpamInfo - - switch ipVer { - case 4: - infoList = n.ipamV4Info - case 6: - infoList = n.ipamV6Info - default: - log.Warnf("incorrect ip version passed to ipam release: %d", ipVer) - return - } - - if infoList == nil { - return - } - - log.Debugf("releasing IPv%d pools from network %s (%s)", ipVer, n.Name(), n.ID()) - - for _, d := range infoList { - if d.Gateway != nil { - if err := ipam.ReleaseAddress(d.PoolID, d.Gateway.IP); err != nil { - log.Warnf("Failed to release gateway ip address %s on delete of network %s (%s): %v", d.Gateway.IP, n.Name(), n.ID(), err) - } - } - if d.IPAMData.AuxAddresses != nil { - for k, nw := range d.IPAMData.AuxAddresses { - if d.Pool.Contains(nw.IP) { - if err := ipam.ReleaseAddress(d.PoolID, nw.IP); err != nil && err != ipamapi.ErrIPOutOfRange { - log.Warnf("Failed to release secondary ip address %s (%v) on delete of network %s (%s): %v", k, nw.IP, n.Name(), n.ID(), err) - } - } - } - } - if err := ipam.ReleasePool(d.PoolID); err != nil { - log.Warnf("Failed to release address pool %s on delete of network %s (%s): %v", d.PoolID, n.Name(), n.ID(), err) - } - } -} - -func (n *network) getIPInfo(ipVer int) []*IpamInfo { - var info []*IpamInfo - switch ipVer { - case 4: - info = n.ipamV4Info - case 6: - info = n.ipamV6Info - default: - return nil - } - l := make([]*IpamInfo, 0, len(info)) - n.Lock() - for _, d := range info { - l = append(l, d) - } - n.Unlock() - return l -} - -func (n *network) getIPData(ipVer int) []driverapi.IPAMData { - var info []*IpamInfo - switch ipVer { - case 4: - info = n.ipamV4Info - case 6: - info = n.ipamV6Info - default: - return nil - } - l := make([]driverapi.IPAMData, 0, len(info)) - n.Lock() - for _, d := range info { - l = append(l, d.IPAMData) - } - n.Unlock() - return l -} - -func (n *network) deriveAddressSpace() (string, error) { - c := n.getController() - c.Lock() - ipd, ok := c.ipamDrivers[n.ipamType] - c.Unlock() - if !ok { - return "", types.NotFoundErrorf("could not find ipam driver %s to get default address space", n.ipamType) - } - if n.DataScope() == datastore.GlobalScope { - return ipd.defaultGlobalAddressSpace, nil - } - return ipd.defaultLocalAddressSpace, nil -} - -func (n *network) Info() NetworkInfo { - return n -} - -func (n *network) DriverOptions() map[string]string { - n.Lock() - defer n.Unlock() - if n.generic != nil { - if m, ok := n.generic[netlabel.GenericData]; ok { - return m.(map[string]string) - } - } - return map[string]string{} -} - -func (n *network) Scope() string { - return n.driverScope() -} - -func (n *network) IpamConfig() (string, map[string]string, []*IpamConf, []*IpamConf) { - n.Lock() - defer n.Unlock() - - v4L := make([]*IpamConf, len(n.ipamV4Config)) - v6L := make([]*IpamConf, len(n.ipamV6Config)) - - for i, c := range n.ipamV4Config { - cc := &IpamConf{} - c.CopyTo(cc) - v4L[i] = cc - } - - for i, c := range n.ipamV6Config { - cc := &IpamConf{} - c.CopyTo(cc) - v6L[i] = cc - } - - return n.ipamType, n.ipamOptions, v4L, v6L -} - -func (n *network) IpamInfo() ([]*IpamInfo, []*IpamInfo) { - n.Lock() - defer n.Unlock() - - v4Info := make([]*IpamInfo, len(n.ipamV4Info)) - v6Info := make([]*IpamInfo, len(n.ipamV6Info)) - - for i, info := range n.ipamV4Info { - ic := &IpamInfo{} - info.CopyTo(ic) - v4Info[i] = ic - } - - for i, info := range n.ipamV6Info { - ic := &IpamInfo{} - info.CopyTo(ic) - v6Info[i] = ic - } - - return v4Info, v6Info -} - -func (n *network) Internal() bool { - n.Lock() - defer n.Unlock() - - return n.internal -} diff --git a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf_test.go b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf_test.go deleted file mode 100644 index 878d2484..00000000 --- a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf_test.go +++ /dev/null @@ -1,299 +0,0 @@ -package resolvconf - -import ( - "bytes" - "io/ioutil" - "os" - "testing" - - "github.com/docker/docker/pkg/ioutils" - _ "github.com/docker/libnetwork/testutils" -) - -func TestGet(t *testing.T) { - resolvConfUtils, err := Get() - if err != nil { - t.Fatal(err) - } - resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") - if err != nil { - t.Fatal(err) - } - if string(resolvConfUtils.Content) != string(resolvConfSystem) { - t.Fatalf("/etc/resolv.conf and GetResolvConf have different content.") - } - hashSystem, err := ioutils.HashData(bytes.NewReader(resolvConfSystem)) - if err != nil { - t.Fatal(err) - } - if resolvConfUtils.Hash != hashSystem { - t.Fatalf("/etc/resolv.conf and GetResolvConf have different hashes.") - } -} - -func TestGetNameservers(t *testing.T) { - for resolv, result := range map[string][]string{` -nameserver 1.2.3.4 -nameserver 40.3.200.10 -search example.com`: {"1.2.3.4", "40.3.200.10"}, - `search example.com`: {}, - `nameserver 1.2.3.4 -search example.com -nameserver 4.30.20.100`: {"1.2.3.4", "4.30.20.100"}, - ``: {}, - ` nameserver 1.2.3.4 `: {"1.2.3.4"}, - `search example.com -nameserver 1.2.3.4 -#nameserver 4.3.2.1`: {"1.2.3.4"}, - `search example.com -nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4"}, - } { - test := GetNameservers([]byte(resolv)) - if !strSlicesEqual(test, result) { - t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) - } - } -} - -func TestGetNameserversAsCIDR(t *testing.T) { - for resolv, result := range map[string][]string{` -nameserver 1.2.3.4 -nameserver 40.3.200.10 -search example.com`: {"1.2.3.4/32", "40.3.200.10/32"}, - `search example.com`: {}, - `nameserver 1.2.3.4 -search example.com -nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"}, - ``: {}, - ` nameserver 1.2.3.4 `: {"1.2.3.4/32"}, - `search example.com -nameserver 1.2.3.4 -#nameserver 4.3.2.1`: {"1.2.3.4/32"}, - `search example.com -nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"}, - } { - test := GetNameserversAsCIDR([]byte(resolv)) - if !strSlicesEqual(test, result) { - t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) - } - } -} - -func TestGetSearchDomains(t *testing.T) { - for resolv, result := range map[string][]string{ - `search example.com`: {"example.com"}, - `search example.com # ignored`: {"example.com"}, - ` search example.com `: {"example.com"}, - ` search example.com # ignored`: {"example.com"}, - `search foo.example.com example.com`: {"foo.example.com", "example.com"}, - ` search foo.example.com example.com `: {"foo.example.com", "example.com"}, - ` search foo.example.com example.com # ignored`: {"foo.example.com", "example.com"}, - ``: {}, - `# ignored`: {}, - `nameserver 1.2.3.4 -search foo.example.com example.com`: {"foo.example.com", "example.com"}, - `nameserver 1.2.3.4 -search dup1.example.com dup2.example.com -search foo.example.com example.com`: {"foo.example.com", "example.com"}, - `nameserver 1.2.3.4 -search foo.example.com example.com -nameserver 4.30.20.100`: {"foo.example.com", "example.com"}, - } { - test := GetSearchDomains([]byte(resolv)) - if !strSlicesEqual(test, result) { - t.Fatalf("Wrong search domain string {%s} should be %v. Input: %s", test, result, resolv) - } - } -} - -func TestGetOptions(t *testing.T) { - for resolv, result := range map[string][]string{ - `options opt1`: {"opt1"}, - `options opt1 # ignored`: {"opt1"}, - ` options opt1 `: {"opt1"}, - ` options opt1 # ignored`: {"opt1"}, - `options opt1 opt2 opt3`: {"opt1", "opt2", "opt3"}, - `options opt1 opt2 opt3 # ignored`: {"opt1", "opt2", "opt3"}, - ` options opt1 opt2 opt3 `: {"opt1", "opt2", "opt3"}, - ` options opt1 opt2 opt3 # ignored`: {"opt1", "opt2", "opt3"}, - ``: {}, - `# ignored`: {}, - `nameserver 1.2.3.4`: {}, - `nameserver 1.2.3.4 -options opt1 opt2 opt3`: {"opt1", "opt2", "opt3"}, - `nameserver 1.2.3.4 -options opt1 opt2 -options opt3 opt4`: {"opt3", "opt4"}, - } { - test := GetOptions([]byte(resolv)) - if !strSlicesEqual(test, result) { - t.Fatalf("Wrong options string {%s} should be %v. Input: %s", test, result, resolv) - } - } -} - -func strSlicesEqual(a, b []string) bool { - if len(a) != len(b) { - return false - } - - for i, v := range a { - if v != b[i] { - return false - } - } - - return true -} - -func TestBuild(t *testing.T) { - file, err := ioutil.TempFile("", "") - if err != nil { - t.Fatal(err) - } - defer os.Remove(file.Name()) - - _, err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"search1"}, []string{"opt1"}) - if err != nil { - t.Fatal(err) - } - - content, err := ioutil.ReadFile(file.Name()) - if err != nil { - t.Fatal(err) - } - - if expected := "search search1\nnameserver ns1\nnameserver ns2\nnameserver ns3\noptions opt1\n"; !bytes.Contains(content, []byte(expected)) { - t.Fatalf("Expected to find '%s' got '%s'", expected, content) - } -} - -func TestBuildWithZeroLengthDomainSearch(t *testing.T) { - file, err := ioutil.TempFile("", "") - if err != nil { - t.Fatal(err) - } - defer os.Remove(file.Name()) - - _, err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"."}, []string{"opt1"}) - if err != nil { - t.Fatal(err) - } - - content, err := ioutil.ReadFile(file.Name()) - if err != nil { - t.Fatal(err) - } - - if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\noptions opt1\n"; !bytes.Contains(content, []byte(expected)) { - t.Fatalf("Expected to find '%s' got '%s'", expected, content) - } - if notExpected := "search ."; bytes.Contains(content, []byte(notExpected)) { - t.Fatalf("Expected to not find '%s' got '%s'", notExpected, content) - } -} - -func TestBuildWithNoOptions(t *testing.T) { - file, err := ioutil.TempFile("", "") - if err != nil { - t.Fatal(err) - } - defer os.Remove(file.Name()) - - _, err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"search1"}, []string{}) - if err != nil { - t.Fatal(err) - } - - content, err := ioutil.ReadFile(file.Name()) - if err != nil { - t.Fatal(err) - } - - if expected := "search search1\nnameserver ns1\nnameserver ns2\nnameserver ns3\n"; !bytes.Contains(content, []byte(expected)) { - t.Fatalf("Expected to find '%s' got '%s'", expected, content) - } - if notExpected := "search ."; bytes.Contains(content, []byte(notExpected)) { - t.Fatalf("Expected to not find '%s' got '%s'", notExpected, content) - } -} - -func TestFilterResolvDns(t *testing.T) { - ns0 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\n" - - if result, _ := FilterResolvDNS([]byte(ns0), false); result != nil { - if ns0 != string(result.Content) { - t.Fatalf("Failed No Localhost: expected \n<%s> got \n<%s>", ns0, string(result.Content)) - } - } - - ns1 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\nnameserver 127.0.0.1\n" - if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil { - if ns0 != string(result.Content) { - t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result.Content)) - } - } - - ns1 = "nameserver 10.16.60.14\nnameserver 127.0.0.1\nnameserver 10.16.60.21\n" - if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil { - if ns0 != string(result.Content) { - t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result.Content)) - } - } - - ns1 = "nameserver 127.0.1.1\nnameserver 10.16.60.14\nnameserver 10.16.60.21\n" - if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil { - if ns0 != string(result.Content) { - t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result.Content)) - } - } - - ns1 = "nameserver ::1\nnameserver 10.16.60.14\nnameserver 127.0.2.1\nnameserver 10.16.60.21\n" - if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil { - if ns0 != string(result.Content) { - t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result.Content)) - } - } - - ns1 = "nameserver 10.16.60.14\nnameserver ::1\nnameserver 10.16.60.21\nnameserver ::1" - if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil { - if ns0 != string(result.Content) { - t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result.Content)) - } - } - - // with IPv6 disabled (false param), the IPv6 nameserver should be removed - ns1 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\nnameserver ::1" - if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil { - if ns0 != string(result.Content) { - t.Fatalf("Failed Localhost+IPv6 off: expected \n<%s> got \n<%s>", ns0, string(result.Content)) - } - } - - // with IPv6 enabled, the IPv6 nameserver should be preserved - ns0 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\n" - ns1 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\nnameserver ::1" - if result, _ := FilterResolvDNS([]byte(ns1), true); result != nil { - if ns0 != string(result.Content) { - t.Fatalf("Failed Localhost+IPv6 on: expected \n<%s> got \n<%s>", ns0, string(result.Content)) - } - } - - // with IPv6 enabled, and no non-localhost servers, Google defaults (both IPv4+IPv6) should be added - ns0 = "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\nnameserver 2001:4860:4860::8888\nnameserver 2001:4860:4860::8844" - ns1 = "nameserver 127.0.0.1\nnameserver ::1\nnameserver 127.0.2.1" - if result, _ := FilterResolvDNS([]byte(ns1), true); result != nil { - if ns0 != string(result.Content) { - t.Fatalf("Failed no Localhost+IPv6 enabled: expected \n<%s> got \n<%s>", ns0, string(result.Content)) - } - } - - // with IPv6 disabled, and no non-localhost servers, Google defaults (only IPv4) should be added - ns0 = "\nnameserver 8.8.8.8\nnameserver 8.8.4.4" - ns1 = "nameserver 127.0.0.1\nnameserver ::1\nnameserver 127.0.2.1" - if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil { - if ns0 != string(result.Content) { - t.Fatalf("Failed no Localhost+IPv6 enabled: expected \n<%s> got \n<%s>", ns0, string(result.Content)) - } - } -} diff --git a/vendor/github.com/docker/libnetwork/resolver.go b/vendor/github.com/docker/libnetwork/resolver.go deleted file mode 100644 index d395ab46..00000000 --- a/vendor/github.com/docker/libnetwork/resolver.go +++ /dev/null @@ -1,208 +0,0 @@ -package libnetwork - -import ( - "fmt" - "net" - "strings" - - log "github.com/Sirupsen/logrus" - "github.com/docker/libnetwork/iptables" - "github.com/miekg/dns" -) - -// Resolver represents the embedded DNS server in Docker. It operates -// by listening on container's loopback interface for DNS queries. -type Resolver interface { - // Start starts the name server for the container - Start() error - // Stop stops the name server for the container. Stopped resolver - // can be reused after running the SetupFunc again. - Stop() - // SetupFunc() provides the setup function that should be run - // in the container's network namespace. - SetupFunc() func() - // NameServer() returns the IP of the DNS resolver for the - // containers. - NameServer() string - // To configure external name servers the resolver should use - SetExtServers([]string) - // ResolverOptions returns resolv.conf options that should be set - ResolverOptions() []string -} - -const ( - resolverIP = "127.0.0.11" - dnsPort = "53" - ptrIPv4domain = ".in-addr.arpa." - ptrIPv6domain = ".ip6.arpa." - respTTL = 1800 -) - -// resolver implements the Resolver interface -type resolver struct { - sb *sandbox - extDNS []string - server *dns.Server - conn *net.UDPConn - err error -} - -// NewResolver creates a new instance of the Resolver -func NewResolver(sb *sandbox) Resolver { - return &resolver{ - sb: sb, - err: fmt.Errorf("setup not done yet"), - } -} - -func (r *resolver) SetupFunc() func() { - return (func() { - var err error - - addr := &net.UDPAddr{ - IP: net.ParseIP(resolverIP), - } - - r.conn, err = net.ListenUDP("udp", addr) - if err != nil { - r.err = fmt.Errorf("error in opening name server socket %v", err) - return - } - laddr := r.conn.LocalAddr() - _, ipPort, _ := net.SplitHostPort(laddr.String()) - - rules := [][]string{ - {"-t", "nat", "-A", "OUTPUT", "-d", resolverIP, "-p", "udp", "--dport", dnsPort, "-j", "DNAT", "--to-destination", laddr.String()}, - {"-t", "nat", "-A", "POSTROUTING", "-s", resolverIP, "-p", "udp", "--sport", ipPort, "-j", "SNAT", "--to-source", ":" + dnsPort}, - } - - for _, rule := range rules { - r.err = iptables.RawCombinedOutput(rule...) - if r.err != nil { - return - } - } - r.err = nil - }) -} - -func (r *resolver) Start() error { - // make sure the resolver has been setup before starting - if r.err != nil { - return r.err - } - s := &dns.Server{Handler: r, PacketConn: r.conn} - r.server = s - go func() { - s.ActivateAndServe() - }() - return nil -} - -func (r *resolver) Stop() { - if r.server != nil { - r.server.Shutdown() - } - r.conn = nil - r.err = fmt.Errorf("setup not done yet") -} - -func (r *resolver) SetExtServers(dns []string) { - r.extDNS = dns -} - -func (r *resolver) NameServer() string { - return resolverIP -} - -func (r *resolver) ResolverOptions() []string { - return []string{"ndots:0"} -} - -func (r *resolver) handleIPv4Query(name string, query *dns.Msg) (*dns.Msg, error) { - addr := r.sb.ResolveName(name) - if addr == nil { - return nil, nil - } - - log.Debugf("Lookup for %s: IP %s", name, addr.String()) - - resp := new(dns.Msg) - resp.SetReply(query) - - rr := new(dns.A) - rr.Hdr = dns.RR_Header{Name: name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: respTTL} - rr.A = addr - resp.Answer = append(resp.Answer, rr) - return resp, nil -} - -func (r *resolver) handlePTRQuery(ptr string, query *dns.Msg) (*dns.Msg, error) { - parts := []string{} - - if strings.HasSuffix(ptr, ptrIPv4domain) { - parts = strings.Split(ptr, ptrIPv4domain) - } else if strings.HasSuffix(ptr, ptrIPv6domain) { - parts = strings.Split(ptr, ptrIPv6domain) - } else { - return nil, fmt.Errorf("invalid PTR query, %v", ptr) - } - - host := r.sb.ResolveIP(parts[0]) - if len(host) == 0 { - return nil, nil - } - - log.Debugf("Lookup for IP %s: name %s", parts[0], host) - fqdn := dns.Fqdn(host) - - resp := new(dns.Msg) - resp.SetReply(query) - - rr := new(dns.PTR) - rr.Hdr = dns.RR_Header{Name: ptr, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: respTTL} - rr.Ptr = fqdn - resp.Answer = append(resp.Answer, rr) - return resp, nil -} - -func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) { - var ( - resp *dns.Msg - err error - ) - - name := query.Question[0].Name - if query.Question[0].Qtype == dns.TypeA { - resp, err = r.handleIPv4Query(name, query) - } else if query.Question[0].Qtype == dns.TypePTR { - resp, err = r.handlePTRQuery(name, query) - } - - if err != nil { - log.Error(err) - return - } - - if resp == nil { - if len(r.extDNS) == 0 { - return - } - log.Debugf("Querying ext dns %s for %s[%d]", r.extDNS[0], name, query.Question[0].Qtype) - - c := &dns.Client{Net: "udp"} - addr := fmt.Sprintf("%s:%d", r.extDNS[0], 53) - - // TODO: iterate over avilable servers in case of error - resp, _, err = c.Exchange(query, addr) - if err != nil { - log.Errorf("external resolution failed, %s", err) - return - } - } - - err = w.WriteMsg(resp) - if err != nil { - log.Errorf("error writing resolver resp, %s", err) - } -} diff --git a/vendor/github.com/docker/libnetwork/sandbox.go b/vendor/github.com/docker/libnetwork/sandbox.go deleted file mode 100644 index 9dbb100e..00000000 --- a/vendor/github.com/docker/libnetwork/sandbox.go +++ /dev/null @@ -1,1197 +0,0 @@ -package libnetwork - -import ( - "container/heap" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "os" - "path" - "path/filepath" - "strings" - "sync" - - log "github.com/Sirupsen/logrus" - "github.com/docker/libnetwork/etchosts" - "github.com/docker/libnetwork/osl" - "github.com/docker/libnetwork/resolvconf" - "github.com/docker/libnetwork/types" -) - -// Sandbox provides the control over the network container entity. It is a one to one mapping with the container. -type Sandbox interface { - // ID returns the ID of the sandbox - ID() string - // Key returns the sandbox's key - Key() string - // ContainerID returns the container id associated to this sandbox - ContainerID() string - // Labels returns the sandbox's labels - Labels() map[string]interface{} - // Statistics retrieves the interfaces' statistics for the sandbox - Statistics() (map[string]*types.InterfaceStatistics, error) - // Refresh leaves all the endpoints, resets and re-apply the options, - // re-joins all the endpoints without destroying the osl sandbox - Refresh(options ...SandboxOption) error - // SetKey updates the Sandbox Key - SetKey(key string) error - // Rename changes the name of all attached Endpoints - Rename(name string) error - // Delete destroys this container after detaching it from all connected endpoints. - Delete() error - // ResolveName searches for the service name in the networks to which the sandbox - // is connected to. - ResolveName(name string) net.IP - // ResolveIP returns the service name for the passed in IP. IP is in reverse dotted - // notation; the format used for DNS PTR records - ResolveIP(name string) string -} - -// SandboxOption is a option setter function type used to pass varios options to -// NewNetContainer method. The various setter functions of type SandboxOption are -// provided by libnetwork, they look like ContainerOptionXXXX(...) -type SandboxOption func(sb *sandbox) - -func (sb *sandbox) processOptions(options ...SandboxOption) { - for _, opt := range options { - if opt != nil { - opt(sb) - } - } -} - -type epHeap []*endpoint - -type sandbox struct { - id string - containerID string - config containerConfig - extDNS []string - osSbox osl.Sandbox - controller *controller - resolver Resolver - resolverOnce sync.Once - refCnt int - endpoints epHeap - epPriority map[string]int - joinLeaveDone chan struct{} - dbIndex uint64 - dbExists bool - isStub bool - inDelete bool - sync.Mutex -} - -// These are the container configs used to customize container /etc/hosts file. -type hostsPathConfig struct { - hostName string - domainName string - hostsPath string - originHostsPath string - extraHosts []extraHost - parentUpdates []parentUpdate -} - -type parentUpdate struct { - cid string - name string - ip string -} - -type extraHost struct { - name string - IP string -} - -// These are the container configs used to customize container /etc/resolv.conf file. -type resolvConfPathConfig struct { - resolvConfPath string - originResolvConfPath string - resolvConfHashFile string - dnsList []string - dnsSearchList []string - dnsOptionsList []string -} - -type containerConfig struct { - hostsPathConfig - resolvConfPathConfig - generic map[string]interface{} - useDefaultSandBox bool - useExternalKey bool - prio int // higher the value, more the priority -} - -func (sb *sandbox) ID() string { - return sb.id -} - -func (sb *sandbox) ContainerID() string { - return sb.containerID -} - -func (sb *sandbox) Key() string { - if sb.config.useDefaultSandBox { - return osl.GenerateKey("default") - } - return osl.GenerateKey(sb.id) -} - -func (sb *sandbox) Labels() map[string]interface{} { - return sb.config.generic -} - -func (sb *sandbox) Statistics() (map[string]*types.InterfaceStatistics, error) { - m := make(map[string]*types.InterfaceStatistics) - - if sb.osSbox == nil { - return m, nil - } - - var err error - for _, i := range sb.osSbox.Info().Interfaces() { - if m[i.DstName()], err = i.Statistics(); err != nil { - return m, err - } - } - - return m, nil -} - -func (sb *sandbox) Delete() error { - sb.Lock() - if sb.inDelete { - sb.Unlock() - return types.ForbiddenErrorf("another sandbox delete in progress") - } - // Set the inDelete flag. This will ensure that we don't - // update the store until we have completed all the endpoint - // leaves and deletes. And when endpoint leaves and deletes - // are completed then we can finally delete the sandbox object - // altogether from the data store. If the daemon exits - // ungracefully in the middle of a sandbox delete this way we - // will have all the references to the endpoints in the - // sandbox so that we can clean them up when we restart - sb.inDelete = true - sb.Unlock() - - c := sb.controller - - // Detach from all endpoints - retain := false - for _, ep := range sb.getConnectedEndpoints() { - // endpoint in the Gateway network will be cleaned up - // when when sandbox no longer needs external connectivity - if ep.endpointInGWNetwork() { - continue - } - - // Retain the sanbdox if we can't obtain the network from store. - if _, err := c.getNetworkFromStore(ep.getNetwork().ID()); err != nil { - retain = true - log.Warnf("Failed getting network for ep %s during sandbox %s delete: %v", ep.ID(), sb.ID(), err) - continue - } - - if err := ep.Leave(sb); err != nil { - log.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err) - } - - if err := ep.Delete(false); err != nil { - log.Warnf("Failed deleting endpoint %s: %v\n", ep.ID(), err) - } - } - - if retain { - sb.Lock() - sb.inDelete = false - sb.Unlock() - return fmt.Errorf("could not cleanup all the endpoints in container %s / sandbox %s", sb.containerID, sb.id) - } - // Container is going away. Path cache in etchosts is most - // likely not required any more. Drop it. - etchosts.Drop(sb.config.hostsPath) - - if sb.resolver != nil { - sb.resolver.Stop() - } - - if sb.osSbox != nil && !sb.config.useDefaultSandBox { - sb.osSbox.Destroy() - } - - if err := sb.storeDelete(); err != nil { - log.Warnf("Failed to delete sandbox %s from store: %v", sb.ID(), err) - } - - c.Lock() - delete(c.sandboxes, sb.ID()) - c.Unlock() - - return nil -} - -func (sb *sandbox) Rename(name string) error { - var err error - - for _, ep := range sb.getConnectedEndpoints() { - if ep.endpointInGWNetwork() { - continue - } - - oldName := ep.Name() - lEp := ep - if err = ep.rename(name); err != nil { - break - } - - defer func() { - if err != nil { - lEp.rename(oldName) - } - }() - } - - return err -} - -func (sb *sandbox) Refresh(options ...SandboxOption) error { - // Store connected endpoints - epList := sb.getConnectedEndpoints() - - // Detach from all endpoints - for _, ep := range epList { - if err := ep.Leave(sb); err != nil { - log.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err) - } - } - - // Re-apply options - sb.config = containerConfig{} - sb.processOptions(options...) - - // Setup discovery files - if err := sb.setupResolutionFiles(); err != nil { - return err - } - - // Re -connect to all endpoints - for _, ep := range epList { - if err := ep.Join(sb); err != nil { - log.Warnf("Failed attach sandbox %s to endpoint %s: %v\n", sb.ID(), ep.ID(), err) - } - } - - return nil -} - -func (sb *sandbox) MarshalJSON() ([]byte, error) { - sb.Lock() - defer sb.Unlock() - - // We are just interested in the container ID. This can be expanded to include all of containerInfo if there is a need - return json.Marshal(sb.id) -} - -func (sb *sandbox) UnmarshalJSON(b []byte) (err error) { - sb.Lock() - defer sb.Unlock() - - var id string - if err := json.Unmarshal(b, &id); err != nil { - return err - } - sb.id = id - return nil -} - -func (sb *sandbox) startResolver() { - sb.resolverOnce.Do(func() { - var err error - sb.resolver = NewResolver(sb) - defer func() { - if err != nil { - sb.resolver = nil - } - }() - - sb.rebuildDNS() - sb.resolver.SetExtServers(sb.extDNS) - - sb.osSbox.InvokeFunc(sb.resolver.SetupFunc()) - if err := sb.resolver.Start(); err != nil { - log.Errorf("Resolver Setup/Start failed for container %s, %q", sb.ContainerID(), err) - } - }) -} - -func (sb *sandbox) setupResolutionFiles() error { - if err := sb.buildHostsFile(); err != nil { - return err - } - - if err := sb.updateParentHosts(); err != nil { - return err - } - - if err := sb.setupDNS(); err != nil { - return err - } - - return nil -} - -func (sb *sandbox) getConnectedEndpoints() []*endpoint { - sb.Lock() - defer sb.Unlock() - - eps := make([]*endpoint, len(sb.endpoints)) - for i, ep := range sb.endpoints { - eps[i] = ep - } - - return eps -} - -func (sb *sandbox) getEndpoint(id string) *endpoint { - sb.Lock() - defer sb.Unlock() - - for _, ep := range sb.endpoints { - if ep.id == id { - return ep - } - } - - return nil -} - -func (sb *sandbox) updateGateway(ep *endpoint) error { - sb.Lock() - osSbox := sb.osSbox - sb.Unlock() - if osSbox == nil { - return nil - } - osSbox.UnsetGateway() - osSbox.UnsetGatewayIPv6() - - if ep == nil { - return nil - } - - ep.Lock() - joinInfo := ep.joinInfo - ep.Unlock() - - if err := osSbox.SetGateway(joinInfo.gw); err != nil { - return fmt.Errorf("failed to set gateway while updating gateway: %v", err) - } - - if err := osSbox.SetGatewayIPv6(joinInfo.gw6); err != nil { - return fmt.Errorf("failed to set IPv6 gateway while updating gateway: %v", err) - } - - return nil -} - -func (sb *sandbox) ResolveIP(ip string) string { - var svc string - log.Debugf("IP To resolve %v", ip) - - for _, ep := range sb.getConnectedEndpoints() { - n := ep.getNetwork() - - sr, ok := n.getController().svcDb[n.ID()] - if !ok { - continue - } - - nwName := n.Name() - n.Lock() - svc, ok = sr.ipMap[ip] - n.Unlock() - if ok { - return svc + "." + nwName - } - } - return svc -} - -func (sb *sandbox) ResolveName(name string) net.IP { - var ip net.IP - parts := strings.Split(name, ".") - log.Debugf("To resolve %v", parts) - - reqName := parts[0] - networkName := "" - if len(parts) > 1 { - networkName = parts[1] - } - epList := sb.getConnectedEndpoints() - // First check for local container alias - ip = sb.resolveName(reqName, networkName, epList, true) - if ip != nil { - return ip - } - - // Resolve the actual container name - return sb.resolveName(reqName, networkName, epList, false) -} - -func (sb *sandbox) resolveName(req string, networkName string, epList []*endpoint, alias bool) net.IP { - for _, ep := range epList { - name := req - n := ep.getNetwork() - - if networkName != "" && networkName != n.Name() { - continue - } - - if alias { - if ep.aliases == nil { - continue - } - - var ok bool - ep.Lock() - name, ok = ep.aliases[req] - ep.Unlock() - if !ok { - continue - } - } else { - // If it is a regular lookup and if the requested name is an alias - // dont perform a svc lookup for this endpoint. - ep.Lock() - if _, ok := ep.aliases[req]; ok { - ep.Unlock() - continue - } - ep.Unlock() - } - - sr, ok := n.getController().svcDb[n.ID()] - if !ok { - continue - } - - n.Lock() - ip, ok := sr.svcMap[name] - n.Unlock() - if ok { - return ip[0] - } - } - return nil -} - -func (sb *sandbox) SetKey(basePath string) error { - if basePath == "" { - return types.BadRequestErrorf("invalid sandbox key") - } - - sb.Lock() - oldosSbox := sb.osSbox - sb.Unlock() - - if oldosSbox != nil { - // If we already have an OS sandbox, release the network resources from that - // and destroy the OS snab. We are moving into a new home further down. Note that none - // of the network resources gets destroyed during the move. - sb.releaseOSSbox() - } - - osSbox, err := osl.GetSandboxForExternalKey(basePath, sb.Key()) - if err != nil { - return err - } - - sb.Lock() - sb.osSbox = osSbox - sb.Unlock() - defer func() { - if err != nil { - sb.Lock() - sb.osSbox = nil - sb.Unlock() - } - }() - - // If the resolver was setup before stop it and set it up in the - // new osl sandbox. - if oldosSbox != nil && sb.resolver != nil { - sb.resolver.Stop() - - sb.osSbox.InvokeFunc(sb.resolver.SetupFunc()) - if err := sb.resolver.Start(); err != nil { - log.Errorf("Resolver Setup/Start failed for container %s, %q", sb.ContainerID(), err) - } - } - - for _, ep := range sb.getConnectedEndpoints() { - if err = sb.populateNetworkResources(ep); err != nil { - return err - } - } - return nil -} - -func releaseOSSboxResources(osSbox osl.Sandbox, ep *endpoint) { - for _, i := range osSbox.Info().Interfaces() { - // Only remove the interfaces owned by this endpoint from the sandbox. - if ep.hasInterface(i.SrcName()) { - if err := i.Remove(); err != nil { - log.Debugf("Remove interface failed: %v", err) - } - } - } - - ep.Lock() - joinInfo := ep.joinInfo - ep.Unlock() - - if joinInfo == nil { - return - } - - // Remove non-interface routes. - for _, r := range joinInfo.StaticRoutes { - if err := osSbox.RemoveStaticRoute(r); err != nil { - log.Debugf("Remove route failed: %v", err) - } - } -} - -func (sb *sandbox) releaseOSSbox() { - sb.Lock() - osSbox := sb.osSbox - sb.osSbox = nil - sb.Unlock() - - if osSbox == nil { - return - } - - for _, ep := range sb.getConnectedEndpoints() { - releaseOSSboxResources(osSbox, ep) - } - - osSbox.Destroy() -} - -func (sb *sandbox) populateNetworkResources(ep *endpoint) error { - sb.Lock() - if sb.osSbox == nil { - sb.Unlock() - return nil - } - inDelete := sb.inDelete - sb.Unlock() - - ep.Lock() - joinInfo := ep.joinInfo - i := ep.iface - ep.Unlock() - - if ep.needResolver() { - sb.startResolver() - } - - if i != nil && i.srcName != "" { - var ifaceOptions []osl.IfaceOption - - ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().Address(i.addr), sb.osSbox.InterfaceOptions().Routes(i.routes)) - if i.addrv6 != nil && i.addrv6.IP.To16() != nil { - ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().AddressIPv6(i.addrv6)) - } - - if err := sb.osSbox.AddInterface(i.srcName, i.dstPrefix, ifaceOptions...); err != nil { - return fmt.Errorf("failed to add interface %s to sandbox: %v", i.srcName, err) - } - } - - if joinInfo != nil { - // Set up non-interface routes. - for _, r := range joinInfo.StaticRoutes { - if err := sb.osSbox.AddStaticRoute(r); err != nil { - return fmt.Errorf("failed to add static route %s: %v", r.Destination.String(), err) - } - } - } - - for _, gwep := range sb.getConnectedEndpoints() { - if len(gwep.Gateway()) > 0 { - if gwep != ep { - break - } - if err := sb.updateGateway(gwep); err != nil { - return err - } - } - } - - // Only update the store if we did not come here as part of - // sandbox delete. If we came here as part of delete then do - // not bother updating the store. The sandbox object will be - // deleted anyway - if !inDelete { - return sb.storeUpdate() - } - - return nil -} - -func (sb *sandbox) clearNetworkResources(origEp *endpoint) error { - ep := sb.getEndpoint(origEp.id) - if ep == nil { - return fmt.Errorf("could not find the sandbox endpoint data for endpoint %s", - ep.name) - } - - sb.Lock() - osSbox := sb.osSbox - inDelete := sb.inDelete - sb.Unlock() - if osSbox != nil { - releaseOSSboxResources(osSbox, ep) - } - - sb.Lock() - if len(sb.endpoints) == 0 { - // sb.endpoints should never be empty and this is unexpected error condition - // We log an error message to note this down for debugging purposes. - log.Errorf("No endpoints in sandbox while trying to remove endpoint %s", ep.Name()) - sb.Unlock() - return nil - } - - var ( - gwepBefore, gwepAfter *endpoint - index = -1 - ) - for i, e := range sb.endpoints { - if e == ep { - index = i - } - if len(e.Gateway()) > 0 && gwepBefore == nil { - gwepBefore = e - } - if index != -1 && gwepBefore != nil { - break - } - } - heap.Remove(&sb.endpoints, index) - for _, e := range sb.endpoints { - if len(e.Gateway()) > 0 { - gwepAfter = e - break - } - } - delete(sb.epPriority, ep.ID()) - sb.Unlock() - - if gwepAfter != nil && gwepBefore != gwepAfter { - sb.updateGateway(gwepAfter) - } - - // Only update the store if we did not come here as part of - // sandbox delete. If we came here as part of delete then do - // not bother updating the store. The sandbox object will be - // deleted anyway - if !inDelete { - return sb.storeUpdate() - } - - return nil -} - -const ( - defaultPrefix = "/var/lib/docker/network/files" - dirPerm = 0755 - filePerm = 0644 -) - -func (sb *sandbox) buildHostsFile() error { - if sb.config.hostsPath == "" { - sb.config.hostsPath = defaultPrefix + "/" + sb.id + "/hosts" - } - - dir, _ := filepath.Split(sb.config.hostsPath) - if err := createBasePath(dir); err != nil { - return err - } - - // This is for the host mode networking - if sb.config.originHostsPath != "" { - if err := copyFile(sb.config.originHostsPath, sb.config.hostsPath); err != nil && !os.IsNotExist(err) { - return types.InternalErrorf("could not copy source hosts file %s to %s: %v", sb.config.originHostsPath, sb.config.hostsPath, err) - } - return nil - } - - extraContent := make([]etchosts.Record, 0, len(sb.config.extraHosts)) - for _, extraHost := range sb.config.extraHosts { - extraContent = append(extraContent, etchosts.Record{Hosts: extraHost.name, IP: extraHost.IP}) - } - - return etchosts.Build(sb.config.hostsPath, "", sb.config.hostName, sb.config.domainName, extraContent) -} - -func (sb *sandbox) updateHostsFile(ifaceIP string) error { - var mhost string - - if sb.config.originHostsPath != "" { - return nil - } - - if sb.config.domainName != "" { - mhost = fmt.Sprintf("%s.%s %s", sb.config.hostName, sb.config.domainName, - sb.config.hostName) - } else { - mhost = sb.config.hostName - } - - extraContent := []etchosts.Record{{Hosts: mhost, IP: ifaceIP}} - - sb.addHostsEntries(extraContent) - return nil -} - -func (sb *sandbox) addHostsEntries(recs []etchosts.Record) { - if err := etchosts.Add(sb.config.hostsPath, recs); err != nil { - log.Warnf("Failed adding service host entries to the running container: %v", err) - } -} - -func (sb *sandbox) deleteHostsEntries(recs []etchosts.Record) { - if err := etchosts.Delete(sb.config.hostsPath, recs); err != nil { - log.Warnf("Failed deleting service host entries to the running container: %v", err) - } -} - -func (sb *sandbox) updateParentHosts() error { - var pSb Sandbox - - for _, update := range sb.config.parentUpdates { - sb.controller.WalkSandboxes(SandboxContainerWalker(&pSb, update.cid)) - if pSb == nil { - continue - } - if err := etchosts.Update(pSb.(*sandbox).config.hostsPath, update.ip, update.name); err != nil { - return err - } - } - - return nil -} - -func (sb *sandbox) setupDNS() error { - var newRC *resolvconf.File - - if sb.config.resolvConfPath == "" { - sb.config.resolvConfPath = defaultPrefix + "/" + sb.id + "/resolv.conf" - } - - sb.config.resolvConfHashFile = sb.config.resolvConfPath + ".hash" - - dir, _ := filepath.Split(sb.config.resolvConfPath) - if err := createBasePath(dir); err != nil { - return err - } - - // This is for the host mode networking - if sb.config.originResolvConfPath != "" { - if err := copyFile(sb.config.originResolvConfPath, sb.config.resolvConfPath); err != nil { - return fmt.Errorf("could not copy source resolv.conf file %s to %s: %v", sb.config.originResolvConfPath, sb.config.resolvConfPath, err) - } - return nil - } - - currRC, err := resolvconf.Get() - if err != nil { - return err - } - - if len(sb.config.dnsList) > 0 || len(sb.config.dnsSearchList) > 0 || len(sb.config.dnsOptionsList) > 0 { - var ( - err error - dnsList = resolvconf.GetNameservers(currRC.Content) - dnsSearchList = resolvconf.GetSearchDomains(currRC.Content) - dnsOptionsList = resolvconf.GetOptions(currRC.Content) - ) - if len(sb.config.dnsList) > 0 { - dnsList = sb.config.dnsList - } - if len(sb.config.dnsSearchList) > 0 { - dnsSearchList = sb.config.dnsSearchList - } - if len(sb.config.dnsOptionsList) > 0 { - dnsOptionsList = sb.config.dnsOptionsList - } - newRC, err = resolvconf.Build(sb.config.resolvConfPath, dnsList, dnsSearchList, dnsOptionsList) - if err != nil { - return err - } - } else { - // Replace any localhost/127.* (at this point we have no info about ipv6, pass it as true) - if newRC, err = resolvconf.FilterResolvDNS(currRC.Content, true); err != nil { - return err - } - // No contention on container resolv.conf file at sandbox creation - if err := ioutil.WriteFile(sb.config.resolvConfPath, newRC.Content, filePerm); err != nil { - return types.InternalErrorf("failed to write unhaltered resolv.conf file content when setting up dns for sandbox %s: %v", sb.ID(), err) - } - } - - // Write hash - if err := ioutil.WriteFile(sb.config.resolvConfHashFile, []byte(newRC.Hash), filePerm); err != nil { - return types.InternalErrorf("failed to write resolv.conf hash file when setting up dns for sandbox %s: %v", sb.ID(), err) - } - - return nil -} - -func (sb *sandbox) updateDNS(ipv6Enabled bool) error { - var ( - currHash string - hashFile = sb.config.resolvConfHashFile - ) - - if len(sb.config.dnsList) > 0 || len(sb.config.dnsSearchList) > 0 || len(sb.config.dnsOptionsList) > 0 { - return nil - } - - currRC, err := resolvconf.GetSpecific(sb.config.resolvConfPath) - if err != nil { - if !os.IsNotExist(err) { - return err - } - } else { - h, err := ioutil.ReadFile(hashFile) - if err != nil { - if !os.IsNotExist(err) { - return err - } - } else { - currHash = string(h) - } - } - - if currHash != "" && currHash != currRC.Hash { - // Seems the user has changed the container resolv.conf since the last time - // we checked so return without doing anything. - log.Infof("Skipping update of resolv.conf file with ipv6Enabled: %t because file was touched by user", ipv6Enabled) - return nil - } - - // replace any localhost/127.* and remove IPv6 nameservers if IPv6 disabled. - newRC, err := resolvconf.FilterResolvDNS(currRC.Content, ipv6Enabled) - if err != nil { - return err - } - - // for atomic updates to these files, use temporary files with os.Rename: - dir := path.Dir(sb.config.resolvConfPath) - tmpHashFile, err := ioutil.TempFile(dir, "hash") - if err != nil { - return err - } - tmpResolvFile, err := ioutil.TempFile(dir, "resolv") - if err != nil { - return err - } - - // Change the perms to filePerm (0644) since ioutil.TempFile creates it by default as 0600 - if err := os.Chmod(tmpResolvFile.Name(), filePerm); err != nil { - return err - } - - // write the updates to the temp files - if err = ioutil.WriteFile(tmpHashFile.Name(), []byte(newRC.Hash), filePerm); err != nil { - return err - } - if err = ioutil.WriteFile(tmpResolvFile.Name(), newRC.Content, filePerm); err != nil { - return err - } - - // rename the temp files for atomic replace - if err = os.Rename(tmpHashFile.Name(), hashFile); err != nil { - return err - } - return os.Rename(tmpResolvFile.Name(), sb.config.resolvConfPath) -} - -// Embedded DNS server has to be enabled for this sandbox. Rebuild the container's -// resolv.conf by doing the follwing -// - Save the external name servers in resolv.conf in the sandbox -// - Add only the embedded server's IP to container's resolv.conf -// - If the embedded server needs any resolv.conf options add it to the current list -func (sb *sandbox) rebuildDNS() error { - currRC, err := resolvconf.GetSpecific(sb.config.resolvConfPath) - if err != nil { - return err - } - - // localhost entries have already been filtered out from the list - sb.extDNS = resolvconf.GetNameservers(currRC.Content) - - var ( - dnsList = []string{sb.resolver.NameServer()} - dnsOptionsList = resolvconf.GetOptions(currRC.Content) - dnsSearchList = resolvconf.GetSearchDomains(currRC.Content) - ) - - // Resolver returns the options in the format resolv.conf expects - dnsOptionsList = append(dnsOptionsList, sb.resolver.ResolverOptions()...) - - dir := path.Dir(sb.config.resolvConfPath) - tmpResolvFile, err := ioutil.TempFile(dir, "resolv") - if err != nil { - return err - } - - // Change the perms to filePerm (0644) since ioutil.TempFile creates it by default as 0600 - if err := os.Chmod(tmpResolvFile.Name(), filePerm); err != nil { - return err - } - - _, err = resolvconf.Build(tmpResolvFile.Name(), dnsList, dnsSearchList, dnsOptionsList) - if err != nil { - return err - } - - return os.Rename(tmpResolvFile.Name(), sb.config.resolvConfPath) -} - -// joinLeaveStart waits to ensure there are no joins or leaves in progress and -// marks this join/leave in progress without race -func (sb *sandbox) joinLeaveStart() { - sb.Lock() - defer sb.Unlock() - - for sb.joinLeaveDone != nil { - joinLeaveDone := sb.joinLeaveDone - sb.Unlock() - - select { - case <-joinLeaveDone: - } - - sb.Lock() - } - - sb.joinLeaveDone = make(chan struct{}) -} - -// joinLeaveEnd marks the end of this join/leave operation and -// signals the same without race to other join and leave waiters -func (sb *sandbox) joinLeaveEnd() { - sb.Lock() - defer sb.Unlock() - - if sb.joinLeaveDone != nil { - close(sb.joinLeaveDone) - sb.joinLeaveDone = nil - } -} - -// OptionHostname function returns an option setter for hostname option to -// be passed to NewSandbox method. -func OptionHostname(name string) SandboxOption { - return func(sb *sandbox) { - sb.config.hostName = name - } -} - -// OptionDomainname function returns an option setter for domainname option to -// be passed to NewSandbox method. -func OptionDomainname(name string) SandboxOption { - return func(sb *sandbox) { - sb.config.domainName = name - } -} - -// OptionHostsPath function returns an option setter for hostspath option to -// be passed to NewSandbox method. -func OptionHostsPath(path string) SandboxOption { - return func(sb *sandbox) { - sb.config.hostsPath = path - } -} - -// OptionOriginHostsPath function returns an option setter for origin hosts file path -// tbeo passed to NewSandbox method. -func OptionOriginHostsPath(path string) SandboxOption { - return func(sb *sandbox) { - sb.config.originHostsPath = path - } -} - -// OptionExtraHost function returns an option setter for extra /etc/hosts options -// which is a name and IP as strings. -func OptionExtraHost(name string, IP string) SandboxOption { - return func(sb *sandbox) { - sb.config.extraHosts = append(sb.config.extraHosts, extraHost{name: name, IP: IP}) - } -} - -// OptionParentUpdate function returns an option setter for parent container -// which needs to update the IP address for the linked container. -func OptionParentUpdate(cid string, name, ip string) SandboxOption { - return func(sb *sandbox) { - sb.config.parentUpdates = append(sb.config.parentUpdates, parentUpdate{cid: cid, name: name, ip: ip}) - } -} - -// OptionResolvConfPath function returns an option setter for resolvconfpath option to -// be passed to net container methods. -func OptionResolvConfPath(path string) SandboxOption { - return func(sb *sandbox) { - sb.config.resolvConfPath = path - } -} - -// OptionOriginResolvConfPath function returns an option setter to set the path to the -// origin resolv.conf file to be passed to net container methods. -func OptionOriginResolvConfPath(path string) SandboxOption { - return func(sb *sandbox) { - sb.config.originResolvConfPath = path - } -} - -// OptionDNS function returns an option setter for dns entry option to -// be passed to container Create method. -func OptionDNS(dns string) SandboxOption { - return func(sb *sandbox) { - sb.config.dnsList = append(sb.config.dnsList, dns) - } -} - -// OptionDNSSearch function returns an option setter for dns search entry option to -// be passed to container Create method. -func OptionDNSSearch(search string) SandboxOption { - return func(sb *sandbox) { - sb.config.dnsSearchList = append(sb.config.dnsSearchList, search) - } -} - -// OptionDNSOptions function returns an option setter for dns options entry option to -// be passed to container Create method. -func OptionDNSOptions(options string) SandboxOption { - return func(sb *sandbox) { - sb.config.dnsOptionsList = append(sb.config.dnsOptionsList, options) - } -} - -// OptionUseDefaultSandbox function returns an option setter for using default sandbox to -// be passed to container Create method. -func OptionUseDefaultSandbox() SandboxOption { - return func(sb *sandbox) { - sb.config.useDefaultSandBox = true - } -} - -// OptionUseExternalKey function returns an option setter for using provided namespace -// instead of creating one. -func OptionUseExternalKey() SandboxOption { - return func(sb *sandbox) { - sb.config.useExternalKey = true - } -} - -// OptionGeneric function returns an option setter for Generic configuration -// that is not managed by libNetwork but can be used by the Drivers during the call to -// net container creation method. Container Labels are a good example. -func OptionGeneric(generic map[string]interface{}) SandboxOption { - return func(sb *sandbox) { - sb.config.generic = generic - } -} - -func (eh epHeap) Len() int { return len(eh) } - -func (eh epHeap) Less(i, j int) bool { - var ( - cip, cjp int - ok bool - ) - - ci, _ := eh[i].getSandbox() - cj, _ := eh[j].getSandbox() - - epi := eh[i] - epj := eh[j] - - if epi.endpointInGWNetwork() { - return false - } - - if epj.endpointInGWNetwork() { - return true - } - - if ci != nil { - cip, ok = ci.epPriority[eh[i].ID()] - if !ok { - cip = 0 - } - } - - if cj != nil { - cjp, ok = cj.epPriority[eh[j].ID()] - if !ok { - cjp = 0 - } - } - - if cip == cjp { - return eh[i].network.Name() < eh[j].network.Name() - } - - return cip > cjp -} - -func (eh epHeap) Swap(i, j int) { eh[i], eh[j] = eh[j], eh[i] } - -func (eh *epHeap) Push(x interface{}) { - *eh = append(*eh, x.(*endpoint)) -} - -func (eh *epHeap) Pop() interface{} { - old := *eh - n := len(old) - x := old[n-1] - *eh = old[0 : n-1] - return x -} - -func createBasePath(dir string) error { - return os.MkdirAll(dir, dirPerm) -} - -func createFile(path string) error { - var f *os.File - - dir, _ := filepath.Split(path) - err := createBasePath(dir) - if err != nil { - return err - } - - f, err = os.Create(path) - if err == nil { - f.Close() - } - - return err -} - -func copyFile(src, dst string) error { - sBytes, err := ioutil.ReadFile(src) - if err != nil { - return err - } - return ioutil.WriteFile(dst, sBytes, filePerm) -} diff --git a/vendor/github.com/docker/libnetwork/sandbox_externalkey.go b/vendor/github.com/docker/libnetwork/sandbox_externalkey.go deleted file mode 100644 index 3c362f30..00000000 --- a/vendor/github.com/docker/libnetwork/sandbox_externalkey.go +++ /dev/null @@ -1,12 +0,0 @@ -package libnetwork - -import "github.com/docker/docker/pkg/reexec" - -type setKeyData struct { - ContainerID string - Key string -} - -func init() { - reexec.Register("libnetwork-setkey", processSetKeyReexec) -} diff --git a/vendor/github.com/docker/libnetwork/sandbox_externalkey_unix.go b/vendor/github.com/docker/libnetwork/sandbox_externalkey_unix.go deleted file mode 100644 index 74ae2af7..00000000 --- a/vendor/github.com/docker/libnetwork/sandbox_externalkey_unix.go +++ /dev/null @@ -1,177 +0,0 @@ -// +build !windows - -package libnetwork - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "os" - - "github.com/Sirupsen/logrus" - "github.com/docker/libnetwork/types" - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/configs" -) - -const udsBase = "/var/lib/docker/network/files/" -const success = "success" - -// processSetKeyReexec is a private function that must be called only on an reexec path -// It expects 3 args { [0] = "libnetwork-setkey", [1] = , [2] = } -// It also expects libcontainer.State as a json string in -// Refer to https://github.com/opencontainers/runc/pull/160/ for more information -func processSetKeyReexec() { - var err error - - // Return a failure to the calling process via ExitCode - defer func() { - if err != nil { - logrus.Fatalf("%v", err) - } - }() - - // expecting 3 args {[0]="libnetwork-setkey", [1]=, [2]= } - if len(os.Args) < 3 { - err = fmt.Errorf("Re-exec expects 3 args, received : %d", len(os.Args)) - return - } - containerID := os.Args[1] - - // We expect libcontainer.State as a json string in - stateBuf, err := ioutil.ReadAll(os.Stdin) - if err != nil { - return - } - var state libcontainer.State - if err = json.Unmarshal(stateBuf, &state); err != nil { - return - } - - controllerID := os.Args[2] - key := state.NamespacePaths[configs.NamespaceType("NEWNET")] - - err = SetExternalKey(controllerID, containerID, key) - return -} - -// SetExternalKey provides a convenient way to set an External key to a sandbox -func SetExternalKey(controllerID string, containerID string, key string) error { - keyData := setKeyData{ - ContainerID: containerID, - Key: key} - - c, err := net.Dial("unix", udsBase+controllerID+".sock") - if err != nil { - return err - } - defer c.Close() - - if err = sendKey(c, keyData); err != nil { - return fmt.Errorf("sendKey failed with : %v", err) - } - return processReturn(c) -} - -func sendKey(c net.Conn, data setKeyData) error { - var err error - defer func() { - if err != nil { - c.Close() - } - }() - - var b []byte - if b, err = json.Marshal(data); err != nil { - return err - } - - _, err = c.Write(b) - return err -} - -func processReturn(r io.Reader) error { - buf := make([]byte, 1024) - n, err := r.Read(buf[:]) - if err != nil { - return fmt.Errorf("failed to read buf in processReturn : %v", err) - } - if string(buf[0:n]) != success { - return fmt.Errorf(string(buf[0:n])) - } - return nil -} - -func (c *controller) startExternalKeyListener() error { - if err := os.MkdirAll(udsBase, 0600); err != nil { - return err - } - uds := udsBase + c.id + ".sock" - l, err := net.Listen("unix", uds) - if err != nil { - return err - } - if err := os.Chmod(uds, 0600); err != nil { - l.Close() - return err - } - c.Lock() - c.extKeyListener = l - c.Unlock() - - go c.acceptClientConnections(uds, l) - return nil -} - -func (c *controller) acceptClientConnections(sock string, l net.Listener) { - for { - conn, err := l.Accept() - if err != nil { - if _, err1 := os.Stat(sock); os.IsNotExist(err1) { - logrus.Debugf("Unix socket %s doesnt exist. cannot accept client connections", sock) - return - } - logrus.Errorf("Error accepting connection %v", err) - continue - } - go func() { - err := c.processExternalKey(conn) - ret := success - if err != nil { - ret = err.Error() - } - - _, err = conn.Write([]byte(ret)) - if err != nil { - logrus.Errorf("Error returning to the client %v", err) - } - }() - } -} - -func (c *controller) processExternalKey(conn net.Conn) error { - buf := make([]byte, 1280) - nr, err := conn.Read(buf) - if err != nil { - return err - } - var s setKeyData - if err = json.Unmarshal(buf[0:nr], &s); err != nil { - return err - } - - var sandbox Sandbox - search := SandboxContainerWalker(&sandbox, s.ContainerID) - c.WalkSandboxes(search) - if sandbox == nil { - return types.BadRequestErrorf("no sandbox present for %s", s.ContainerID) - } - - return sandbox.SetKey(s.Key) -} - -func (c *controller) stopExternalKeyListener() { - c.extKeyListener.Close() -} diff --git a/vendor/github.com/docker/libnetwork/sandbox_externalkey_windows.go b/vendor/github.com/docker/libnetwork/sandbox_externalkey_windows.go deleted file mode 100644 index 3c4113e2..00000000 --- a/vendor/github.com/docker/libnetwork/sandbox_externalkey_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build windows - -package libnetwork - -import ( - "io" - "net" - - "github.com/docker/libnetwork/types" -) - -// processSetKeyReexec is a private function that must be called only on an reexec path -// It expects 3 args { [0] = "libnetwork-setkey", [1] = , [2] = } -// It also expects libcontainer.State as a json string in -// Refer to https://github.com/opencontainers/runc/pull/160/ for more information -func processSetKeyReexec() { -} - -// SetExternalKey provides a convenient way to set an External key to a sandbox -func SetExternalKey(controllerID string, containerID string, key string) error { - return types.NotImplementedErrorf("SetExternalKey isn't supported on non linux systems") -} - -func sendKey(c net.Conn, data setKeyData) error { - return types.NotImplementedErrorf("sendKey isn't supported on non linux systems") -} - -func processReturn(r io.Reader) error { - return types.NotImplementedErrorf("processReturn isn't supported on non linux systems") -} - -// no-op on non linux systems -func (c *controller) startExternalKeyListener() error { - return nil -} - -func (c *controller) acceptClientConnections(sock string, l net.Listener) { -} - -func (c *controller) processExternalKey(conn net.Conn) error { - return types.NotImplementedErrorf("processExternalKey isn't supported on non linux systems") -} - -func (c *controller) stopExternalKeyListener() { -} diff --git a/vendor/github.com/docker/libnetwork/sandbox_store.go b/vendor/github.com/docker/libnetwork/sandbox_store.go deleted file mode 100644 index 61eda408..00000000 --- a/vendor/github.com/docker/libnetwork/sandbox_store.go +++ /dev/null @@ -1,233 +0,0 @@ -package libnetwork - -import ( - "container/heap" - "encoding/json" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/libnetwork/datastore" - "github.com/docker/libnetwork/osl" -) - -const ( - sandboxPrefix = "sandbox" -) - -type epState struct { - Eid string - Nid string -} - -type sbState struct { - ID string - Cid string - c *controller - dbIndex uint64 - dbExists bool - Eps []epState -} - -func (sbs *sbState) Key() []string { - return []string{sandboxPrefix, sbs.ID} -} - -func (sbs *sbState) KeyPrefix() []string { - return []string{sandboxPrefix} -} - -func (sbs *sbState) Value() []byte { - b, err := json.Marshal(sbs) - if err != nil { - return nil - } - return b -} - -func (sbs *sbState) SetValue(value []byte) error { - return json.Unmarshal(value, sbs) -} - -func (sbs *sbState) Index() uint64 { - sbi, err := sbs.c.SandboxByID(sbs.ID) - if err != nil { - return sbs.dbIndex - } - - sb := sbi.(*sandbox) - maxIndex := sb.dbIndex - if sbs.dbIndex > maxIndex { - maxIndex = sbs.dbIndex - } - - return maxIndex -} - -func (sbs *sbState) SetIndex(index uint64) { - sbs.dbIndex = index - sbs.dbExists = true - - sbi, err := sbs.c.SandboxByID(sbs.ID) - if err != nil { - return - } - - sb := sbi.(*sandbox) - sb.dbIndex = index - sb.dbExists = true -} - -func (sbs *sbState) Exists() bool { - if sbs.dbExists { - return sbs.dbExists - } - - sbi, err := sbs.c.SandboxByID(sbs.ID) - if err != nil { - return false - } - - sb := sbi.(*sandbox) - return sb.dbExists -} - -func (sbs *sbState) Skip() bool { - return false -} - -func (sbs *sbState) New() datastore.KVObject { - return &sbState{c: sbs.c} -} - -func (sbs *sbState) CopyTo(o datastore.KVObject) error { - dstSbs := o.(*sbState) - dstSbs.c = sbs.c - dstSbs.ID = sbs.ID - dstSbs.Cid = sbs.Cid - dstSbs.dbIndex = sbs.dbIndex - dstSbs.dbExists = sbs.dbExists - - for _, eps := range sbs.Eps { - dstSbs.Eps = append(dstSbs.Eps, eps) - } - - return nil -} - -func (sbs *sbState) DataScope() string { - return datastore.LocalScope -} - -func (sb *sandbox) storeUpdate() error { - sbs := &sbState{ - c: sb.controller, - ID: sb.id, - Cid: sb.containerID, - } - -retry: - sbs.Eps = nil - for _, ep := range sb.getConnectedEndpoints() { - // If the endpoint is not persisted then do not add it to - // the sandbox checkpoint - if ep.Skip() { - continue - } - - eps := epState{ - Nid: ep.getNetwork().ID(), - Eid: ep.ID(), - } - - sbs.Eps = append(sbs.Eps, eps) - } - - err := sb.controller.updateToStore(sbs) - if err == datastore.ErrKeyModified { - // When we get ErrKeyModified it is sufficient to just - // go back and retry. No need to get the object from - // the store because we always regenerate the store - // state from in memory sandbox state - goto retry - } - - return err -} - -func (sb *sandbox) storeDelete() error { - sbs := &sbState{ - c: sb.controller, - ID: sb.id, - Cid: sb.containerID, - dbIndex: sb.dbIndex, - dbExists: sb.dbExists, - } - - return sb.controller.deleteFromStore(sbs) -} - -func (c *controller) sandboxCleanup() { - store := c.getStore(datastore.LocalScope) - if store == nil { - logrus.Errorf("Could not find local scope store while trying to cleanup sandboxes") - return - } - - kvol, err := store.List(datastore.Key(sandboxPrefix), &sbState{c: c}) - if err != nil && err != datastore.ErrKeyNotFound { - logrus.Errorf("failed to get sandboxes for scope %s: %v", store.Scope(), err) - return - } - - // It's normal for no sandboxes to be found. Just bail out. - if err == datastore.ErrKeyNotFound { - return - } - - for _, kvo := range kvol { - sbs := kvo.(*sbState) - - sb := &sandbox{ - id: sbs.ID, - controller: sbs.c, - containerID: sbs.Cid, - endpoints: epHeap{}, - epPriority: map[string]int{}, - dbIndex: sbs.dbIndex, - isStub: true, - dbExists: true, - } - - sb.osSbox, err = osl.NewSandbox(sb.Key(), true) - if err != nil { - logrus.Errorf("failed to create new osl sandbox while trying to build sandbox for cleanup: %v", err) - continue - } - - c.Lock() - c.sandboxes[sb.id] = sb - c.Unlock() - - for _, eps := range sbs.Eps { - n, err := c.getNetworkFromStore(eps.Nid) - var ep *endpoint - if err != nil { - logrus.Errorf("getNetworkFromStore for nid %s failed while trying to build sandbox for cleanup: %v", eps.Nid, err) - n = &network{id: eps.Nid, ctrlr: c, drvOnce: &sync.Once{}} - ep = &endpoint{id: eps.Eid, network: n, sandboxID: sbs.ID} - } else { - ep, err = n.getEndpointFromStore(eps.Eid) - if err != nil { - logrus.Errorf("getEndpointFromStore for eid %s failed while trying to build sandbox for cleanup: %v", eps.Eid, err) - ep = &endpoint{id: eps.Eid, network: n, sandboxID: sbs.ID} - } - } - - heap.Push(&sb.endpoints, ep) - } - - if err := sb.Delete(); err != nil { - logrus.Errorf("failed to delete sandbox %s while trying to cleanup: %v", sb.id, err) - } - } -} diff --git a/vendor/github.com/docker/libnetwork/sandbox_test.go b/vendor/github.com/docker/libnetwork/sandbox_test.go deleted file mode 100644 index b17275cc..00000000 --- a/vendor/github.com/docker/libnetwork/sandbox_test.go +++ /dev/null @@ -1,213 +0,0 @@ -package libnetwork - -import ( - "testing" - - "github.com/docker/libnetwork/config" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" - "github.com/docker/libnetwork/osl" - "github.com/docker/libnetwork/testutils" -) - -func createEmptyCtrlr() *controller { - return &controller{sandboxes: sandboxTable{}} -} - -func getTestEnv(t *testing.T) (NetworkController, Network, Network) { - netType := "bridge" - - option := options.Generic{ - "EnableIPForwarding": true, - } - genericOption := make(map[string]interface{}) - genericOption[netlabel.GenericData] = option - - cfgOptions, err := OptionBoltdbWithRandomDBFile() - if err != nil { - t.Fatal(err) - } - c, err := New(append(cfgOptions, config.OptionDriverConfig(netType, genericOption))...) - if err != nil { - t.Fatal(err) - } - - name1 := "test_nw_1" - netOption1 := options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": name1, - }, - } - n1, err := c.NewNetwork(netType, name1, NetworkOptionGeneric(netOption1)) - if err != nil { - t.Fatal(err) - } - - name2 := "test_nw_2" - netOption2 := options.Generic{ - netlabel.GenericData: options.Generic{ - "BridgeName": name2, - }, - } - n2, err := c.NewNetwork(netType, name2, NetworkOptionGeneric(netOption2)) - if err != nil { - t.Fatal(err) - } - - return c, n1, n2 -} - -func TestSandboxAddEmpty(t *testing.T) { - ctrlr := createEmptyCtrlr() - - sbx, err := ctrlr.NewSandbox("sandbox0") - if err != nil { - t.Fatal(err) - } - - if err := sbx.Delete(); err != nil { - t.Fatal(err) - } - - if len(ctrlr.sandboxes) != 0 { - t.Fatalf("controller sandboxes is not empty. len = %d", len(ctrlr.sandboxes)) - } - - osl.GC() -} - -func TestSandboxAddMultiPrio(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - c, nw, _ := getTestEnv(t) - ctrlr := c.(*controller) - - sbx, err := ctrlr.NewSandbox("sandbox1") - if err != nil { - t.Fatal(err) - } - sid := sbx.ID() - - ep1, err := nw.CreateEndpoint("ep1") - if err != nil { - t.Fatal(err) - } - ep2, err := nw.CreateEndpoint("ep2") - if err != nil { - t.Fatal(err) - } - ep3, err := nw.CreateEndpoint("ep3") - if err != nil { - t.Fatal(err) - } - - if err := ep1.Join(sbx, JoinOptionPriority(ep1, 1)); err != nil { - t.Fatal(err) - } - - if err := ep2.Join(sbx, JoinOptionPriority(ep2, 2)); err != nil { - t.Fatal(err) - } - - if err := ep3.Join(sbx, JoinOptionPriority(ep3, 3)); err != nil { - t.Fatal(err) - } - - if ctrlr.sandboxes[sid].endpoints[0].ID() != ep3.ID() { - t.Fatal("Expected ep3 to be at the top of the heap. But did not find ep3 at the top of the heap") - } - - if err := ep3.Leave(sbx); err != nil { - t.Fatal(err) - } - if ctrlr.sandboxes[sid].endpoints[0].ID() != ep2.ID() { - t.Fatal("Expected ep2 to be at the top of the heap after removing ep3. But did not find ep2 at the top of the heap") - } - - if err := ep2.Leave(sbx); err != nil { - t.Fatal(err) - } - if ctrlr.sandboxes[sid].endpoints[0].ID() != ep1.ID() { - t.Fatal("Expected ep1 to be at the top of the heap after removing ep2. But did not find ep1 at the top of the heap") - } - - // Re-add ep3 back - if err := ep3.Join(sbx, JoinOptionPriority(ep3, 3)); err != nil { - t.Fatal(err) - } - - if ctrlr.sandboxes[sid].endpoints[0].ID() != ep3.ID() { - t.Fatal("Expected ep3 to be at the top of the heap after adding ep3 back. But did not find ep3 at the top of the heap") - } - - if err := sbx.Delete(); err != nil { - t.Fatal(err) - } - - if len(ctrlr.sandboxes) != 0 { - t.Fatalf("controller sandboxes is not empty. len = %d", len(ctrlr.sandboxes)) - } - - osl.GC() -} - -func TestSandboxAddSamePrio(t *testing.T) { - if !testutils.IsRunningInContainer() { - defer testutils.SetupTestOSContext(t)() - } - - c, nw1, nw2 := getTestEnv(t) - - ctrlr := c.(*controller) - - sbx, err := ctrlr.NewSandbox("sandbox1") - if err != nil { - t.Fatal(err) - } - sid := sbx.ID() - - ep1, err := nw1.CreateEndpoint("ep1") - if err != nil { - t.Fatal(err) - } - ep2, err := nw2.CreateEndpoint("ep2") - if err != nil { - t.Fatal(err) - } - - if err := ep1.Join(sbx); err != nil { - t.Fatal(err) - } - - if err := ep2.Join(sbx); err != nil { - t.Fatal(err) - } - - if ctrlr.sandboxes[sid].endpoints[0].ID() != ep1.ID() { - t.Fatal("Expected ep1 to be at the top of the heap. But did not find ep1 at the top of the heap") - } - - if err := ep1.Leave(sbx); err != nil { - t.Fatal(err) - } - - if ctrlr.sandboxes[sid].endpoints[0].ID() != ep2.ID() { - t.Fatal("Expected ep2 to be at the top of the heap after removing ep3. But did not find ep2 at the top of the heap") - } - - if err := ep2.Leave(sbx); err != nil { - t.Fatal(err) - } - - if err := sbx.Delete(); err != nil { - t.Fatal(err) - } - - if len(ctrlr.sandboxes) != 0 { - t.Fatalf("controller containers is not empty. len = %d", len(ctrlr.sandboxes)) - } - - osl.GC() -} diff --git a/vendor/github.com/docker/libnetwork/store.go b/vendor/github.com/docker/libnetwork/store.go deleted file mode 100644 index be3e8ae6..00000000 --- a/vendor/github.com/docker/libnetwork/store.go +++ /dev/null @@ -1,415 +0,0 @@ -package libnetwork - -import ( - "fmt" - - log "github.com/Sirupsen/logrus" - "github.com/docker/libnetwork/datastore" -) - -func (c *controller) initStores() error { - c.Lock() - if c.cfg == nil { - c.Unlock() - return nil - } - scopeConfigs := c.cfg.Scopes - c.Unlock() - - for scope, scfg := range scopeConfigs { - store, err := datastore.NewDataStore(scope, scfg) - if err != nil { - return err - } - c.Lock() - c.stores = append(c.stores, store) - c.Unlock() - } - - c.startWatch() - return nil -} - -func (c *controller) closeStores() { - for _, store := range c.getStores() { - store.Close() - } -} - -func (c *controller) getStore(scope string) datastore.DataStore { - c.Lock() - defer c.Unlock() - - for _, store := range c.stores { - if store.Scope() == scope { - return store - } - } - - return nil -} - -func (c *controller) getStores() []datastore.DataStore { - c.Lock() - defer c.Unlock() - - return c.stores -} - -func (c *controller) getNetworkFromStore(nid string) (*network, error) { - for _, store := range c.getStores() { - n := &network{id: nid, ctrlr: c} - err := store.GetObject(datastore.Key(n.Key()...), n) - // Continue searching in the next store if the key is not found in this store - if err != nil { - if err != datastore.ErrKeyNotFound { - log.Debugf("could not find network %s: %v", nid, err) - } - continue - } - - ec := &endpointCnt{n: n} - err = store.GetObject(datastore.Key(ec.Key()...), ec) - if err != nil { - return nil, fmt.Errorf("could not find endpoint count for network %s: %v", n.Name(), err) - } - - n.epCnt = ec - return n, nil - } - - return nil, fmt.Errorf("network %s not found", nid) -} - -func (c *controller) getNetworksForScope(scope string) ([]*network, error) { - var nl []*network - - store := c.getStore(scope) - if store == nil { - return nil, nil - } - - kvol, err := store.List(datastore.Key(datastore.NetworkKeyPrefix), - &network{ctrlr: c}) - if err != nil && err != datastore.ErrKeyNotFound { - return nil, fmt.Errorf("failed to get networks for scope %s: %v", - scope, err) - } - - for _, kvo := range kvol { - n := kvo.(*network) - n.ctrlr = c - - ec := &endpointCnt{n: n} - err = store.GetObject(datastore.Key(ec.Key()...), ec) - if err != nil { - return nil, fmt.Errorf("could not find endpoint count key %s for network %s while listing: %v", datastore.Key(ec.Key()...), n.Name(), err) - } - - n.epCnt = ec - nl = append(nl, n) - } - - return nl, nil -} - -func (c *controller) getNetworksFromStore() ([]*network, error) { - var nl []*network - - for _, store := range c.getStores() { - kvol, err := store.List(datastore.Key(datastore.NetworkKeyPrefix), - &network{ctrlr: c}) - // Continue searching in the next store if no keys found in this store - if err != nil { - if err != datastore.ErrKeyNotFound { - log.Debugf("failed to get networks for scope %s: %v", store.Scope(), err) - } - continue - } - - for _, kvo := range kvol { - n := kvo.(*network) - n.Lock() - n.ctrlr = c - n.Unlock() - - ec := &endpointCnt{n: n} - err = store.GetObject(datastore.Key(ec.Key()...), ec) - if err != nil { - return nil, fmt.Errorf("could not find endpoint count key %s for network %s while listing: %v", datastore.Key(ec.Key()...), n.Name(), err) - } - - n.epCnt = ec - nl = append(nl, n) - } - } - - return nl, nil -} - -func (n *network) getEndpointFromStore(eid string) (*endpoint, error) { - store := n.ctrlr.getStore(n.Scope()) - if store == nil { - return nil, fmt.Errorf("could not find endpoint %s: datastore not found for scope %s", eid, n.Scope()) - } - - ep := &endpoint{id: eid, network: n} - err := store.GetObject(datastore.Key(ep.Key()...), ep) - if err != nil { - return nil, fmt.Errorf("could not find endpoint %s: %v", eid, err) - } - return ep, nil -} - -func (n *network) getEndpointsFromStore() ([]*endpoint, error) { - var epl []*endpoint - - tmp := endpoint{network: n} - for _, store := range n.getController().getStores() { - kvol, err := store.List(datastore.Key(tmp.KeyPrefix()...), &endpoint{network: n}) - // Continue searching in the next store if no keys found in this store - if err != nil { - if err != datastore.ErrKeyNotFound { - log.Debugf("failed to get endpoints for network %s scope %s: %v", - n.Name(), store.Scope(), err) - } - continue - } - - for _, kvo := range kvol { - ep := kvo.(*endpoint) - epl = append(epl, ep) - } - } - - return epl, nil -} - -func (c *controller) updateToStore(kvObject datastore.KVObject) error { - cs := c.getStore(kvObject.DataScope()) - if cs == nil { - log.Warnf("datastore for scope %s not initialized. kv object %s is not added to the store", kvObject.DataScope(), datastore.Key(kvObject.Key()...)) - return nil - } - - if err := cs.PutObjectAtomic(kvObject); err != nil { - if err == datastore.ErrKeyModified { - return err - } - return fmt.Errorf("failed to update store for object type %T: %v", kvObject, err) - } - - return nil -} - -func (c *controller) deleteFromStore(kvObject datastore.KVObject) error { - cs := c.getStore(kvObject.DataScope()) - if cs == nil { - log.Debugf("datastore for scope %s not initialized. kv object %s is not deleted from datastore", kvObject.DataScope(), datastore.Key(kvObject.Key()...)) - return nil - } - -retry: - if err := cs.DeleteObjectAtomic(kvObject); err != nil { - if err == datastore.ErrKeyModified { - if err := cs.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil { - return fmt.Errorf("could not update the kvobject to latest when trying to delete: %v", err) - } - goto retry - } - return err - } - - return nil -} - -type netWatch struct { - localEps map[string]*endpoint - remoteEps map[string]*endpoint - stopCh chan struct{} -} - -func (c *controller) getLocalEps(nw *netWatch) []*endpoint { - c.Lock() - defer c.Unlock() - - var epl []*endpoint - for _, ep := range nw.localEps { - epl = append(epl, ep) - } - - return epl -} - -func (c *controller) watchSvcRecord(ep *endpoint) { - c.watchCh <- ep -} - -func (c *controller) unWatchSvcRecord(ep *endpoint) { - c.unWatchCh <- ep -} - -func (c *controller) networkWatchLoop(nw *netWatch, ep *endpoint, ecCh <-chan datastore.KVObject) { - for { - select { - case <-nw.stopCh: - return - case o := <-ecCh: - ec := o.(*endpointCnt) - - epl, err := ec.n.getEndpointsFromStore() - if err != nil { - break - } - - c.Lock() - var addEp []*endpoint - - delEpMap := make(map[string]*endpoint) - renameEpMap := make(map[string]bool) - for k, v := range nw.remoteEps { - delEpMap[k] = v - } - - for _, lEp := range epl { - if _, ok := nw.localEps[lEp.ID()]; ok { - continue - } - - if ep, ok := nw.remoteEps[lEp.ID()]; ok { - // On a container rename EP ID will remain - // the same but the name will change. service - // records should reflect the change. - // Keep old EP entry in the delEpMap and add - // EP from the store (which has the new name) - // into the new list - if lEp.name == ep.name { - delete(delEpMap, lEp.ID()) - continue - } - renameEpMap[lEp.ID()] = true - } - nw.remoteEps[lEp.ID()] = lEp - addEp = append(addEp, lEp) - } - - // EPs whose name are to be deleted from the svc records - // should also be removed from nw's remote EP list, except - // the ones that are getting renamed. - for _, lEp := range delEpMap { - if !renameEpMap[lEp.ID()] { - delete(nw.remoteEps, lEp.ID()) - } - } - c.Unlock() - - for _, lEp := range delEpMap { - ep.getNetwork().updateSvcRecord(lEp, c.getLocalEps(nw), false) - - } - for _, lEp := range addEp { - ep.getNetwork().updateSvcRecord(lEp, c.getLocalEps(nw), true) - } - } - } -} - -func (c *controller) processEndpointCreate(nmap map[string]*netWatch, ep *endpoint) { - c.Lock() - nw, ok := nmap[ep.getNetwork().ID()] - c.Unlock() - - if ok { - // Update the svc db for the local endpoint join right away - ep.getNetwork().updateSvcRecord(ep, c.getLocalEps(nw), true) - - c.Lock() - nw.localEps[ep.ID()] = ep - - // If we had learned that from the kv store remove it - // from remote ep list now that we know that this is - // indeed a local endpoint - delete(nw.remoteEps, ep.ID()) - c.Unlock() - return - } - - nw = &netWatch{ - localEps: make(map[string]*endpoint), - remoteEps: make(map[string]*endpoint), - } - - // Update the svc db for the local endpoint join right away - // Do this before adding this ep to localEps so that we don't - // try to update this ep's container's svc records - ep.getNetwork().updateSvcRecord(ep, c.getLocalEps(nw), true) - - c.Lock() - nw.localEps[ep.ID()] = ep - nmap[ep.getNetwork().ID()] = nw - nw.stopCh = make(chan struct{}) - c.Unlock() - - store := c.getStore(ep.getNetwork().DataScope()) - if store == nil { - return - } - - if !store.Watchable() { - return - } - - ch, err := store.Watch(ep.getNetwork().getEpCnt(), nw.stopCh) - if err != nil { - log.Warnf("Error creating watch for network: %v", err) - return - } - - go c.networkWatchLoop(nw, ep, ch) -} - -func (c *controller) processEndpointDelete(nmap map[string]*netWatch, ep *endpoint) { - c.Lock() - nw, ok := nmap[ep.getNetwork().ID()] - - if ok { - delete(nw.localEps, ep.ID()) - c.Unlock() - - // Update the svc db about local endpoint leave right away - // Do this after we remove this ep from localEps so that we - // don't try to remove this svc record from this ep's container. - ep.getNetwork().updateSvcRecord(ep, c.getLocalEps(nw), false) - - c.Lock() - if len(nw.localEps) == 0 { - close(nw.stopCh) - - // This is the last container going away for the network. Destroy - // this network's svc db entry - delete(c.svcDb, ep.getNetwork().ID()) - - delete(nmap, ep.getNetwork().ID()) - } - } - c.Unlock() -} - -func (c *controller) watchLoop() { - for { - select { - case ep := <-c.watchCh: - c.processEndpointCreate(c.nmap, ep) - case ep := <-c.unWatchCh: - c.processEndpointDelete(c.nmap, ep) - } - } -} - -func (c *controller) startWatch() { - c.watchCh = make(chan *endpoint) - c.unWatchCh = make(chan *endpoint) - c.nmap = make(map[string]*netWatch) - - go c.watchLoop() -} diff --git a/vendor/github.com/docker/libnetwork/store_test.go b/vendor/github.com/docker/libnetwork/store_test.go deleted file mode 100644 index b7360ae7..00000000 --- a/vendor/github.com/docker/libnetwork/store_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package libnetwork - -import ( - "fmt" - "io/ioutil" - "os" - "testing" - - "github.com/docker/libkv/store" - "github.com/docker/libnetwork/config" - "github.com/docker/libnetwork/datastore" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" -) - -func TestZooKeeperBackend(t *testing.T) { - c, err := testNewController(t, "zk", "127.0.0.1:2181/custom_prefix") - if err != nil { - t.Fatal(err) - } - c.Stop() -} - -func testNewController(t *testing.T, provider, url string) (NetworkController, error) { - cfgOptions, err := OptionBoltdbWithRandomDBFile() - if err != nil { - return nil, err - } - cfgOptions = append(cfgOptions, config.OptionKVProvider(provider)) - cfgOptions = append(cfgOptions, config.OptionKVProviderURL(url)) - return New(cfgOptions...) -} - -func TestBoltdbBackend(t *testing.T) { - defer os.Remove(datastore.DefaultScopes("")[datastore.LocalScope].Client.Address) - testLocalBackend(t, "", "", nil) - defer os.Remove("/tmp/boltdb.db") - config := &store.Config{Bucket: "testBackend"} - testLocalBackend(t, "boltdb", "/tmp/boltdb.db", config) - -} - -func testLocalBackend(t *testing.T, provider, url string, storeConfig *store.Config) { - cfgOptions := []config.Option{} - cfgOptions = append(cfgOptions, config.OptionLocalKVProvider(provider)) - cfgOptions = append(cfgOptions, config.OptionLocalKVProviderURL(url)) - cfgOptions = append(cfgOptions, config.OptionLocalKVProviderConfig(storeConfig)) - - driverOptions := options.Generic{} - genericOption := make(map[string]interface{}) - genericOption[netlabel.GenericData] = driverOptions - cfgOptions = append(cfgOptions, config.OptionDriverConfig("host", genericOption)) - - ctrl, err := New(cfgOptions...) - if err != nil { - t.Fatalf("Error new controller: %v", err) - } - nw, err := ctrl.NewNetwork("host", "host") - if err != nil { - t.Fatalf("Error creating default \"host\" network: %v", err) - } - ep, err := nw.CreateEndpoint("newendpoint", []EndpointOption{}...) - if err != nil { - t.Fatalf("Error creating endpoint: %v", err) - } - store := ctrl.(*controller).getStore(datastore.LocalScope).KVStore() - if exists, err := store.Exists(datastore.Key(datastore.NetworkKeyPrefix, string(nw.ID()))); !exists || err != nil { - t.Fatalf("Network key should have been created.") - } - if exists, err := store.Exists(datastore.Key([]string{datastore.EndpointKeyPrefix, string(nw.ID()), string(ep.ID())}...)); !exists || err != nil { - t.Fatalf("Endpoint key should have been created.") - } - store.Close() - - // test restore of local store - ctrl, err = New(cfgOptions...) - if err != nil { - t.Fatalf("Error creating controller: %v", err) - } - if _, err = ctrl.NetworkByID(nw.ID()); err != nil { - t.Fatalf("Error getting network %v", err) - } -} - -func TestNoPersist(t *testing.T) { - cfgOptions, err := OptionBoltdbWithRandomDBFile() - if err != nil { - t.Fatalf("Error creating random boltdb file : %v", err) - } - ctrl, err := New(cfgOptions...) - if err != nil { - t.Fatalf("Error new controller: %v", err) - } - nw, err := ctrl.NewNetwork("host", "host", NetworkOptionPersist(false)) - if err != nil { - t.Fatalf("Error creating default \"host\" network: %v", err) - } - ep, err := nw.CreateEndpoint("newendpoint", []EndpointOption{}...) - if err != nil { - t.Fatalf("Error creating endpoint: %v", err) - } - store := ctrl.(*controller).getStore(datastore.LocalScope).KVStore() - if exists, _ := store.Exists(datastore.Key(datastore.NetworkKeyPrefix, string(nw.ID()))); exists { - t.Fatalf("Network with persist=false should not be stored in KV Store") - } - if exists, _ := store.Exists(datastore.Key([]string{datastore.EndpointKeyPrefix, string(nw.ID()), string(ep.ID())}...)); exists { - t.Fatalf("Endpoint in Network with persist=false should not be stored in KV Store") - } - store.Close() -} - -// OptionBoltdbWithRandomDBFile function returns a random dir for local store backend -func OptionBoltdbWithRandomDBFile() ([]config.Option, error) { - tmp, err := ioutil.TempFile("", "libnetwork-") - if err != nil { - return nil, fmt.Errorf("Error creating temp file: %v", err) - } - if err := tmp.Close(); err != nil { - return nil, fmt.Errorf("Error closing temp file: %v", err) - } - cfgOptions := []config.Option{} - cfgOptions = append(cfgOptions, config.OptionLocalKVProvider("boltdb")) - cfgOptions = append(cfgOptions, config.OptionLocalKVProviderURL(tmp.Name())) - sCfg := &store.Config{Bucket: "testBackend"} - cfgOptions = append(cfgOptions, config.OptionLocalKVProviderConfig(sCfg)) - return cfgOptions, nil -} - -func TestMultipleControllersWithSameStore(t *testing.T) { - cfgOptions, err := OptionBoltdbWithRandomDBFile() - if err != nil { - t.Fatalf("Error getting random boltdb configs %v", err) - } - ctrl1, err := New(cfgOptions...) - if err != nil { - t.Fatalf("Error new controller: %v", err) - } - defer ctrl1.Stop() - // Use the same boltdb file without closing the previous controller - _, err = New(cfgOptions...) - if err != nil { - t.Fatalf("Local store must support concurrent controllers") - } -} diff --git a/vendor/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/libtrust/CONTRIBUTING.md new file mode 100644 index 00000000..05be0f8a --- /dev/null +++ b/vendor/github.com/docker/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/docker/libcontainer/LICENSE b/vendor/github.com/docker/libtrust/LICENSE similarity index 100% rename from vendor/github.com/docker/libcontainer/LICENSE rename to vendor/github.com/docker/libtrust/LICENSE diff --git a/vendor/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/libtrust/MAINTAINERS new file mode 100644 index 00000000..9768175f --- /dev/null +++ b/vendor/github.com/docker/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md new file mode 100644 index 00000000..8e7db381 --- /dev/null +++ b/vendor/github.com/docker/libtrust/README.md @@ -0,0 +1,18 @@ +# libtrust + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/vendor/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/libtrust/certificates.go new file mode 100644 index 00000000..3dcca33c --- /dev/null +++ b/vendor/github.com/docker/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/vendor/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/libtrust/doc.go new file mode 100644 index 00000000..ec5d2159 --- /dev/null +++ b/vendor/github.com/docker/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/vendor/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/libtrust/ec_key.go new file mode 100644 index 00000000..00bbe4b3 --- /dev/null +++ b/vendor/github.com/docker/libtrust/ec_key.go @@ -0,0 +1,428 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/vendor/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/libtrust/filter.go new file mode 100644 index 00000000..5b2b4fca --- /dev/null +++ b/vendor/github.com/docker/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/vendor/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/libtrust/hash.go new file mode 100644 index 00000000..a2df787d --- /dev/null +++ b/vendor/github.com/docker/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/vendor/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/libtrust/jsonsign.go new file mode 100644 index 00000000..cb2ca9a7 --- /dev/null +++ b/vendor/github.com/docker/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/vendor/github.com/docker/libtrust/key.go b/vendor/github.com/docker/libtrust/key.go new file mode 100644 index 00000000..73642db2 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/vendor/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/libtrust/key_files.go new file mode 100644 index 00000000..c526de54 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/vendor/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/libtrust/key_manager.go new file mode 100644 index 00000000..9a98ae35 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/vendor/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/libtrust/rsa_key.go new file mode 100644 index 00000000..dac4cacf --- /dev/null +++ b/vendor/github.com/docker/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go new file mode 100644 index 00000000..d88176cc --- /dev/null +++ b/vendor/github.com/docker/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters ommitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/vendor/github.com/docker/machine/log/log_test.go b/vendor/github.com/docker/machine/log/log_test.go deleted file mode 100644 index 349ffe6e..00000000 --- a/vendor/github.com/docker/machine/log/log_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package log - -import "testing" - -func TestTerminalLoggerWithFields(t *testing.T) { - logger := TerminalLogger{} - withFieldsLogger := logger.WithFields(Fields{ - "foo": "bar", - "spam": "eggs", - }) - withFieldsTerminalLogger, ok := withFieldsLogger.(TerminalLogger) - if !ok { - t.Fatal("Type assertion to TerminalLogger failed") - } - expectedOutFields := "\t\t foo=bar spam=eggs" - if withFieldsTerminalLogger.fieldOut != expectedOutFields { - t.Fatalf("Expected %q, got %q", expectedOutFields, withFieldsTerminalLogger.fieldOut) - } -} diff --git a/vendor/github.com/docker/machine/main.go b/vendor/github.com/docker/machine/main.go deleted file mode 100644 index a37bf1d9..00000000 --- a/vendor/github.com/docker/machine/main.go +++ /dev/null @@ -1,125 +0,0 @@ -package main - -import ( - "os" - "path" - - "github.com/codegangsta/cli" - - "github.com/docker/machine/commands" - "github.com/docker/machine/log" - "github.com/docker/machine/ssh" - "github.com/docker/machine/utils" - "github.com/docker/machine/version" -) - -var AppHelpTemplate = `Usage: {{.Name}} {{if .Flags}}[OPTIONS] {{end}}COMMAND [arg...] - -{{.Usage}} - -Version: {{.Version}}{{if or .Author .Email}} - -Author:{{if .Author}} - {{.Author}}{{if .Email}} - <{{.Email}}>{{end}}{{else}} - {{.Email}}{{end}}{{end}} -{{if .Flags}} -Options: - {{range .Flags}}{{.}} - {{end}}{{end}} -Commands: - {{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} - {{end}} -Run '{{.Name}} COMMAND --help' for more information on a command. -` - -var CommandHelpTemplate = `Usage: docker-machine {{.Name}}{{if .Flags}} [OPTIONS]{{end}} [arg...] - -{{.Usage}}{{if .Description}} - -Description: - {{.Description}}{{end}}{{if .Flags}} - -Options: - {{range .Flags}} - {{.}}{{end}}{{ end }} -` - -func main() { - for _, f := range os.Args { - if f == "-D" || f == "--debug" || f == "-debug" { - os.Setenv("DEBUG", "1") - } - } - - cli.AppHelpTemplate = AppHelpTemplate - cli.CommandHelpTemplate = CommandHelpTemplate - app := cli.NewApp() - app.Name = path.Base(os.Args[0]) - app.Author = "Docker Machine Contributors" - app.Email = "https://github.com/docker/machine" - app.Before = func(c *cli.Context) error { - os.Setenv("MACHINE_STORAGE_PATH", c.GlobalString("storage-path")) - if c.GlobalBool("native-ssh") { - ssh.SetDefaultClient(ssh.Native) - } - return nil - } - app.Commands = commands.Commands - app.CommandNotFound = cmdNotFound - app.Usage = "Create and manage machines running Docker." - app.Version = version.VERSION + " (" + version.GITCOMMIT + ")" - - app.Flags = []cli.Flag{ - cli.BoolFlag{ - Name: "debug, D", - Usage: "Enable debug mode", - }, - cli.StringFlag{ - EnvVar: "MACHINE_STORAGE_PATH", - Name: "s, storage-path", - Value: utils.GetBaseDir(), - Usage: "Configures storage path", - }, - cli.StringFlag{ - EnvVar: "MACHINE_TLS_CA_CERT", - Name: "tls-ca-cert", - Usage: "CA to verify remotes against", - Value: "", - }, - cli.StringFlag{ - EnvVar: "MACHINE_TLS_CA_KEY", - Name: "tls-ca-key", - Usage: "Private key to generate certificates", - Value: "", - }, - cli.StringFlag{ - EnvVar: "MACHINE_TLS_CLIENT_CERT", - Name: "tls-client-cert", - Usage: "Client cert to use for TLS", - Value: "", - }, - cli.StringFlag{ - EnvVar: "MACHINE_TLS_CLIENT_KEY", - Name: "tls-client-key", - Usage: "Private key used in client TLS auth", - Value: "", - }, - cli.BoolFlag{ - EnvVar: "MACHINE_NATIVE_SSH", - Name: "native-ssh", - Usage: "Use the native (Go-based) SSH implementation.", - }, - } - - app.Run(os.Args) -} - -func cmdNotFound(c *cli.Context, command string) { - log.Fatalf( - "%s: '%s' is not a %s command. See '%s --help'.", - c.App.Name, - command, - c.App.Name, - c.App.Name, - ) -} diff --git a/vendor/github.com/docker/machine/main_test.go b/vendor/github.com/docker/machine/main_test.go deleted file mode 100644 index 06ab7d0f..00000000 --- a/vendor/github.com/docker/machine/main_test.go +++ /dev/null @@ -1 +0,0 @@ -package main diff --git a/vendor/github.com/docker/machine/utils/b2d_test.go b/vendor/github.com/docker/machine/utils/b2d_test.go deleted file mode 100644 index e780a16e..00000000 --- a/vendor/github.com/docker/machine/utils/b2d_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package utils - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "path/filepath" - "testing" -) - -func TestGetLatestBoot2DockerReleaseUrl(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - respText := `[{"tag_name": "0.2", "prerelease": true, "tag_name": "0.1", "prerelease": false}]` - w.Write([]byte(respText)) - })) - defer ts.Close() - - b := NewB2dUtils(ts.URL, ts.URL, "virtualbox") - isoUrl, err := b.GetLatestBoot2DockerReleaseURL() - if err != nil { - t.Fatal(err) - } - - // TODO: update to release URL once we get the releases worked - // out for b2d-ng - //expectedUrl := fmt.Sprintf("%s/boot2docker/boot2docker/releases/download/0.1/boot2docker.iso", ts.URL) - //if isoUrl != expectedUrl { - // t.Fatalf("expected url %s; received %s", isoUrl) - //} - - if isoUrl == "" { - t.Fatalf("expected a url for the iso") - } -} - -func TestDownloadIso(t *testing.T) { - testData := "test-download" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(testData)) - })) - defer ts.Close() - - filename := "test" - - tmpDir, err := ioutil.TempDir("", "machine-test-") - if err != nil { - t.Fatal(err) - } - - b := NewB2dUtils(ts.URL, ts.URL, "") - if err := b.DownloadISO(tmpDir, filename, ts.URL); err != nil { - t.Fatal(err) - } - - data, err := ioutil.ReadFile(filepath.Join(tmpDir, filename)) - if err != nil { - t.Fatal(err) - } - - if string(data) != testData { - t.Fatalf("expected data \"%s\"; received \"%s\"", testData, string(data)) - } -} diff --git a/vendor/github.com/docker/machine/utils/certs_test.go b/vendor/github.com/docker/machine/utils/certs_test.go deleted file mode 100644 index 1201e63d..00000000 --- a/vendor/github.com/docker/machine/utils/certs_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package utils - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestGenerateCACertificate(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "machine-test-") - if err != nil { - t.Fatal(err) - } - // cleanup - defer os.RemoveAll(tmpDir) - - os.Setenv("MACHINE_DIR", tmpDir) - caCertPath := filepath.Join(tmpDir, "ca.pem") - caKeyPath := filepath.Join(tmpDir, "key.pem") - testOrg := "test-org" - bits := 2048 - if err := GenerateCACertificate(caCertPath, caKeyPath, testOrg, bits); err != nil { - t.Fatal(err) - } - - if _, err := os.Stat(caCertPath); err != nil { - t.Fatal(err) - } - if _, err := os.Stat(caKeyPath); err != nil { - t.Fatal(err) - } - os.Setenv("MACHINE_DIR", "") -} - -func TestGenerateCert(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "machine-test-") - if err != nil { - t.Fatal(err) - } - // cleanup - defer os.RemoveAll(tmpDir) - - os.Setenv("MACHINE_DIR", tmpDir) - caCertPath := filepath.Join(tmpDir, "ca.pem") - caKeyPath := filepath.Join(tmpDir, "key.pem") - certPath := filepath.Join(tmpDir, "cert.pem") - keyPath := filepath.Join(tmpDir, "cert-key.pem") - testOrg := "test-org" - bits := 2048 - if err := GenerateCACertificate(caCertPath, caKeyPath, testOrg, bits); err != nil { - t.Fatal(err) - } - - if _, err := os.Stat(caCertPath); err != nil { - t.Fatal(err) - } - if _, err := os.Stat(caKeyPath); err != nil { - t.Fatal(err) - } - os.Setenv("MACHINE_DIR", "") - - if err := GenerateCert([]string{}, certPath, keyPath, caCertPath, caKeyPath, testOrg, bits); err != nil { - t.Fatal(err) - } - - if _, err := os.Stat(certPath); err != nil { - t.Fatalf("certificate not created at %s", certPath) - } - - if _, err := os.Stat(keyPath); err != nil { - t.Fatalf("key not created at %s", keyPath) - } -} diff --git a/vendor/github.com/docker/machine/utils/utils_test.go b/vendor/github.com/docker/machine/utils/utils_test.go deleted file mode 100644 index 1b9f4c6c..00000000 --- a/vendor/github.com/docker/machine/utils/utils_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package utils - -import ( - "io/ioutil" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "testing" -) - -func TestGetBaseDir(t *testing.T) { - // reset any override env var - homeDir := GetHomeDir() - baseDir := GetBaseDir() - - if strings.Index(baseDir, homeDir) != 0 { - t.Fatalf("expected base dir with prefix %s; received %s", homeDir, baseDir) - } -} - -func TestGetCustomBaseDir(t *testing.T) { - root := "/tmp" - os.Setenv("MACHINE_STORAGE_PATH", root) - baseDir := GetBaseDir() - - if strings.Index(baseDir, root) != 0 { - t.Fatalf("expected base dir with prefix %s; received %s", root, baseDir) - } - os.Setenv("MACHINE_STORAGE_PATH", "") -} - -func TestGetDockerDir(t *testing.T) { - homeDir := GetHomeDir() - baseDir := GetBaseDir() - - if strings.Index(baseDir, homeDir) != 0 { - t.Fatalf("expected base dir with prefix %s; received %s", homeDir, baseDir) - } -} - -func TestGetMachineDir(t *testing.T) { - root := "/tmp" - os.Setenv("MACHINE_STORAGE_PATH", root) - machineDir := GetMachineDir() - - if strings.Index(machineDir, root) != 0 { - t.Fatalf("expected machine dir with prefix %s; received %s", root, machineDir) - } - - path, filename := path.Split(machineDir) - if strings.Index(path, root) != 0 { - t.Fatalf("expected base path of %s; received %s", root, path) - } - if filename != "machines" { - t.Fatalf("expected machine dir \"machines\"; received %s", filename) - } - os.Setenv("MACHINE_STORAGE_PATH", "") -} - -func TestGetMachineCertDir(t *testing.T) { - root := "/tmp" - os.Setenv("MACHINE_STORAGE_PATH", root) - clientDir := GetMachineCertDir() - - if strings.Index(clientDir, root) != 0 { - t.Fatalf("expected machine client cert dir with prefix %s; received %s", root, clientDir) - } - - path, filename := path.Split(clientDir) - if strings.Index(path, root) != 0 { - t.Fatalf("expected base path of %s; received %s", root, path) - } - if filename != "certs" { - t.Fatalf("expected machine client dir \"certs\"; received %s", filename) - } - os.Setenv("MACHINE_STORAGE_PATH", "") -} - -func TestCopyFile(t *testing.T) { - testStr := "test-machine" - - srcFile, err := ioutil.TempFile("", "machine-test-") - if err != nil { - t.Fatal(err) - } - srcFi, err := srcFile.Stat() - if err != nil { - t.Fatal(err) - } - - srcFile.Write([]byte(testStr)) - srcFile.Close() - - srcFilePath := filepath.Join(os.TempDir(), srcFi.Name()) - - destFile, err := ioutil.TempFile("", "machine-copy-test-") - if err != nil { - t.Fatal(err) - } - - destFi, err := destFile.Stat() - if err != nil { - t.Fatal(err) - } - - destFile.Close() - - destFilePath := filepath.Join(os.TempDir(), destFi.Name()) - - if err := CopyFile(srcFilePath, destFilePath); err != nil { - t.Fatal(err) - } - - data, err := ioutil.ReadFile(destFilePath) - if err != nil { - t.Fatal(err) - } - - if string(data) != testStr { - t.Fatalf("expected data \"%s\"; received \"%\"", testStr, string(data)) - } -} - -func TestGetUsername(t *testing.T) { - currentUser := "unknown" - switch runtime.GOOS { - case "darwin", "linux": - currentUser = os.Getenv("USER") - case "windows": - currentUser = os.Getenv("USERNAME") - } - - username := GetUsername() - if username != currentUser { - t.Fatalf("expected username %s; received %s", currentUser, username) - } -} diff --git a/vendor/github.com/flynn/go-shlex/shlex_test.go b/vendor/github.com/flynn/go-shlex/shlex_test.go deleted file mode 100644 index 7551f7c5..00000000 --- a/vendor/github.com/flynn/go-shlex/shlex_test.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright 2012 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package shlex - -import ( - "strings" - "testing" -) - -func checkError(err error, t *testing.T) { - if err != nil { - t.Error(err) - } -} - -func TestClassifier(t *testing.T) { - classifier := NewDefaultClassifier() - runeTests := map[int32]RuneTokenType{ - 'a': RUNETOKEN_CHAR, - ' ': RUNETOKEN_SPACE, - '"': RUNETOKEN_ESCAPING_QUOTE, - '\'': RUNETOKEN_NONESCAPING_QUOTE, - '#': RUNETOKEN_COMMENT} - for rune, expectedType := range runeTests { - foundType := classifier.ClassifyRune(rune) - if foundType != expectedType { - t.Logf("Expected type: %v for rune '%c'(%v). Found type: %v.", expectedType, rune, rune, foundType) - t.Fail() - } - } -} - -func TestTokenizer(t *testing.T) { - testInput := strings.NewReader("one two \"three four\" \"five \\\"six\\\"\" seven#eight # nine # ten\n eleven") - expectedTokens := []*Token{ - &Token{ - tokenType: TOKEN_WORD, - value: "one"}, - &Token{ - tokenType: TOKEN_WORD, - value: "two"}, - &Token{ - tokenType: TOKEN_WORD, - value: "three four"}, - &Token{ - tokenType: TOKEN_WORD, - value: "five \"six\""}, - &Token{ - tokenType: TOKEN_WORD, - value: "seven#eight"}, - &Token{ - tokenType: TOKEN_COMMENT, - value: " nine # ten"}, - &Token{ - tokenType: TOKEN_WORD, - value: "eleven"}} - - tokenizer, err := NewTokenizer(testInput) - checkError(err, t) - for _, expectedToken := range expectedTokens { - foundToken, err := tokenizer.NextToken() - checkError(err, t) - if !foundToken.Equal(expectedToken) { - t.Error("Expected token:", expectedToken, ". Found:", foundToken) - } - } -} - -func TestLexer(t *testing.T) { - testInput := strings.NewReader("one") - expectedWord := "one" - lexer, err := NewLexer(testInput) - checkError(err, t) - foundWord, err := lexer.NextWord() - checkError(err, t) - if expectedWord != foundWord { - t.Error("Expected word:", expectedWord, ". Found:", foundWord) - } -} - -func TestSplitSimple(t *testing.T) { - testInput := "one two three" - expectedOutput := []string{"one", "two", "three"} - foundOutput, err := Split(testInput) - if err != nil { - t.Error("Split returned error:", err) - } - if len(expectedOutput) != len(foundOutput) { - t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results") - } - for i := range foundOutput { - if foundOutput[i] != expectedOutput[i] { - t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i]) - } - } -} - -func TestSplitEscapingQuotes(t *testing.T) { - testInput := "one \"два ${three}\" four" - expectedOutput := []string{"one", "два ${three}", "four"} - foundOutput, err := Split(testInput) - if err != nil { - t.Error("Split returned error:", err) - } - if len(expectedOutput) != len(foundOutput) { - t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results") - } - for i := range foundOutput { - if foundOutput[i] != expectedOutput[i] { - t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i]) - } - } -} - -func TestGlobbingExpressions(t *testing.T) { - testInput := "onefile *file one?ile onefil[de]" - expectedOutput := []string{"onefile", "*file", "one?ile", "onefil[de]"} - foundOutput, err := Split(testInput) - if err != nil { - t.Error("Split returned error", err) - } - if len(expectedOutput) != len(foundOutput) { - t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results") - } - for i := range foundOutput { - if foundOutput[i] != expectedOutput[i] { - t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i]) - } - } - -} - -func TestSplitNonEscapingQuotes(t *testing.T) { - testInput := "one 'два ${three}' four" - expectedOutput := []string{"one", "два ${three}", "four"} - foundOutput, err := Split(testInput) - if err != nil { - t.Error("Split returned error:", err) - } - if len(expectedOutput) != len(foundOutput) { - t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results") - } - for i := range foundOutput { - if foundOutput[i] != expectedOutput[i] { - t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i]) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitignore b/vendor/github.com/fsouza/go-dockerclient/.gitignore deleted file mode 100644 index 5f6b48ea..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# temporary symlink for testing -testing/data/symlink diff --git a/vendor/github.com/fsouza/go-dockerclient/.travis.yml b/vendor/github.com/fsouza/go-dockerclient/.travis.yml deleted file mode 100644 index de36cd45..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go -sudo: required -go: - - 1.3.3 - - 1.4.2 - - 1.5.1 - - tip -env: - - GOARCH=amd64 DOCKER_VERSION=1.7.1 - - GOARCH=386 DOCKER_VERSION=1.7.1 - - GOARCH=amd64 DOCKER_VERSION=1.8.3 - - GOARCH=386 DOCKER_VERSION=1.8.3 - - GOARCH=amd64 DOCKER_VERSION=1.9.1 - - GOARCH=386 DOCKER_VERSION=1.9.1 -install: - - make prepare_docker -script: - - make test - - DOCKER_HOST=tcp://127.0.0.1:2375 make integration -services: - - docker diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS deleted file mode 100644 index bb5b042a..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/AUTHORS +++ /dev/null @@ -1,114 +0,0 @@ -# This is the official list of go-dockerclient authors for copyright purposes. - -Abhishek Chanda -Adam Bell-Hanssen -Adrien Kohlbecker -Aldrin Leal -Andreas Jaekle -Andrews Medina -Andrey Sibiryov -Andy Goldstein -Artem Sidorenko -Ben Marini -Ben McCann -Brendan Fosberry -Brian Lalor -Brian P. Hamachek -Brian Palmer -Bryan Boreham -Burke Libbey -Carlos Diaz-Padron -Cesar Wong -Cezar Sa Espinola -Cheah Chu Yeow -cheneydeng -Chris Bednarski -CMGS -Craig Jellick -Dan Williams -Daniel, Dao Quang Minh -Daniel Garcia -Daniel Hiltgen -Darren Shepherd -Dave Choi -David Huie -Dawn Chen -Dinesh Subhraveti -Ed -Elias G. Schneevoigt -Erez Horev -Eric Anderson -Ewout Prangsma -Fabio Rehm -Fatih Arslan -Flavia Missi -Francisco Souza -Grégoire Delattre -Guillermo Álvarez Fernández -He Simei -Ivan Mikushin -James Bardin -Jari Kolehmainen -Jason Wilder -Jawher Moussa -Jean-Baptiste Dalido -Jeff Mitchell -Jeffrey Hulten -Jen Andre -Jérôme Laurens -Johan Euphrosine -John Hughes -Kamil Domanski -Karan Misra -Kim, Hirokuni -Kyle Allan -Liron Levin -Lior Yankovich -Liu Peng -Lorenz Leutgeb -Lucas Clemente -Lucas Weiblen -Lyon Hill -Mantas Matelis -Martin Sweeney -Máximo Cuadros Ortiz -Michael Schmatz -Michal Fojtik -Mike Dillon -Mrunal Patel -Nick Ethier -Omeid Matten -Orivej Desh -Paul Bellamy -Paul Morie -Paul Weil -Peter Edge -Peter Jihoon Kim -Phil Lu -Philippe Lafoucrière -Rafe Colton -Rob Miller -Robert Williamson -Salvador Gironès -Sam Rijs -Sami Wagiaalla -Samuel Karp -Silas Sewell -Simon Eskildsen -Simon Menke -Skolos -Soulou -Sridhar Ratnakumar -Summer Mousa -Sunjin Lee -Tarsis Azevedo -Tim Schindler -Tobi Knaup -Tonic -ttyh061 -Victor Marmol -Vincenzo Prignano -Wiliam Souza -Ye Yin -Yu, Zou -Yuriy Bogdanov diff --git a/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE deleted file mode 100644 index 70663447..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE +++ /dev/null @@ -1,6 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -You can find the Docker license at the following link: -https://raw.githubusercontent.com/docker/docker/master/LICENSE diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile deleted file mode 100644 index 205d8f3c..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/Makefile +++ /dev/null @@ -1,60 +0,0 @@ -.PHONY: \ - all \ - vendor \ - lint \ - vet \ - fmt \ - fmtcheck \ - pretest \ - test \ - integration \ - cov \ - clean - -SRCS = $(shell git ls-files '*.go' | grep -v '^external/') -PKGS = ./. ./testing - -all: test - -vendor: - @ go get -v github.com/mjibson/party - party -d external -c -u - -lint: - @ go get -v github.com/golang/lint/golint - $(foreach file,$(SRCS),golint $(file) || exit;) - -vet: - @-go get -v golang.org/x/tools/cmd/vet - $(foreach pkg,$(PKGS),go vet $(pkg);) - -fmt: - gofmt -w $(SRCS) - -fmtcheck: - $(foreach file,$(SRCS),gofmt -d $(file);) - -prepare_docker: - sudo stop docker - sudo rm -rf /var/lib/docker - sudo rm -f `which docker` - sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D - echo "deb https://apt.dockerproject.org/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list - sudo apt-get update - sudo apt-get install docker-engine=$(DOCKER_VERSION)-0~$(shell lsb_release -cs) -y --force-yes - -pretest: lint vet fmtcheck - -test: pretest - $(foreach pkg,$(PKGS),go test $(pkg) || exit;) - -integration: - go test -tags docker_integration -run TestIntegration -v - -cov: - @ go get -v github.com/axw/gocov/gocov - @ go get golang.org/x/tools/cmd/cover - gocov test | gocov report - -clean: - $(foreach pkg,$(PKGS),go clean $(pkg) || exit;) diff --git a/vendor/github.com/fsouza/go-dockerclient/README.markdown b/vendor/github.com/fsouza/go-dockerclient/README.markdown deleted file mode 100644 index b75a7e92..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/README.markdown +++ /dev/null @@ -1,105 +0,0 @@ -# go-dockerclient - -[![Travis](https://img.shields.io/travis/fsouza/go-dockerclient.svg?style=flat-square)](https://travis-ci.org/fsouza/go-dockerclient) -[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient) - -This package presents a client for the Docker remote API. It also provides -support for the extensions in the [Swarm API](https://docs.docker.com/swarm/api/swarm-api/). - -This package also provides support for docker's network API, which is a simple -passthrough to the libnetwork remote API. Note that docker's network API is -only available in docker 1.8 and above, and only enabled in docker if -DOCKER_EXPERIMENTAL is defined during the docker build process. - -For more details, check the [remote API documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/). - -## Vendoring - -If you are having issues with Go 1.5 and have `GO15VENDOREXPERIMENT` set with an application that has go-dockerclient vendored, -please update your vendoring of go-dockerclient :) We recently moved the `vendor` directory to `external` so that go-dockerclient -is compatible with this configuration. See [338](https://github.com/fsouza/go-dockerclient/issues/338) and [339](https://github.com/fsouza/go-dockerclient/pull/339) -for details. - -## Example - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - endpoint := "unix:///var/run/docker.sock" - client, _ := docker.NewClient(endpoint) - imgs, _ := client.ListImages(docker.ListImagesOptions{All: false}) - for _, img := range imgs { - fmt.Println("ID: ", img.ID) - fmt.Println("RepoTags: ", img.RepoTags) - fmt.Println("Created: ", img.Created) - fmt.Println("Size: ", img.Size) - fmt.Println("VirtualSize: ", img.VirtualSize) - fmt.Println("ParentId: ", img.ParentID) - } -} -``` - -## Using with TLS - -In order to instantiate the client for a TLS-enabled daemon, you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters. - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - endpoint := "tcp://[ip]:[port]" - path := os.Getenv("DOCKER_CERT_PATH") - ca := fmt.Sprintf("%s/ca.pem", path) - cert := fmt.Sprintf("%s/cert.pem", path) - key := fmt.Sprintf("%s/key.pem", path) - client, _ := docker.NewTLSClient(endpoint, cert, key, ca) - // use client -} -``` - -If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables -`DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH`, you can use NewClientFromEnv. - - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - client, _ := docker.NewClientFromEnv() - // use client -} -``` - -See the documentation for more details. - -## Developing - -All development commands can be seen in the [Makefile](Makefile). - -Commited code must pass: - -* [golint](https://github.com/golang/lint) -* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) -* [gofmt](https://golang.org/cmd/gofmt) -* [go test](https://golang.org/cmd/go/#hdr-Test_packages) - -Running `make test` will check all of these. If your editor does not automatically call gofmt, `make fmt` will format all go files in this repository. diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go deleted file mode 100644 index 775c70c0..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/auth.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "path" - "strings" -) - -// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed. -var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg") - -// AuthConfiguration represents authentication options to use in the PushImage -// method. It represents the authentication in the Docker index server. -type AuthConfiguration struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Email string `json:"email,omitempty"` - ServerAddress string `json:"serveraddress,omitempty"` -} - -// AuthConfigurations represents authentication options to use for the -// PushImage method accommodating the new X-Registry-Config header -type AuthConfigurations struct { - Configs map[string]AuthConfiguration `json:"configs"` -} - -// AuthConfigurations119 is used to serialize a set of AuthConfigurations -// for Docker API >= 1.19. -type AuthConfigurations119 map[string]AuthConfiguration - -// dockerConfig represents a registry authentation configuration from the -// .dockercfg file. -type dockerConfig struct { - Auth string `json:"auth"` - Email string `json:"email"` -} - -// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from the -// ~/.dockercfg file. -func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) { - var r io.Reader - var err error - p := path.Join(os.Getenv("HOME"), ".docker", "config.json") - r, err = os.Open(p) - if err != nil { - p := path.Join(os.Getenv("HOME"), ".dockercfg") - r, err = os.Open(p) - if err != nil { - return nil, err - } - } - return NewAuthConfigurations(r) -} - -// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the -// same format as the .dockercfg file. -func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) { - var auth *AuthConfigurations - confs, err := parseDockerConfig(r) - if err != nil { - return nil, err - } - auth, err = authConfigs(confs) - if err != nil { - return nil, err - } - return auth, nil -} - -func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) { - buf := new(bytes.Buffer) - buf.ReadFrom(r) - byteData := buf.Bytes() - - var confsWrapper map[string]map[string]dockerConfig - if err := json.Unmarshal(byteData, &confsWrapper); err == nil { - if confs, ok := confsWrapper["auths"]; ok { - return confs, nil - } - } - - var confs map[string]dockerConfig - if err := json.Unmarshal(byteData, &confs); err != nil { - return nil, err - } - return confs, nil -} - -// authConfigs converts a dockerConfigs map to a AuthConfigurations object. -func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) { - c := &AuthConfigurations{ - Configs: make(map[string]AuthConfiguration), - } - for reg, conf := range confs { - data, err := base64.StdEncoding.DecodeString(conf.Auth) - if err != nil { - return nil, err - } - userpass := strings.SplitN(string(data), ":", 2) - if len(userpass) != 2 { - return nil, ErrCannotParseDockercfg - } - c.Configs[reg] = AuthConfiguration{ - Email: conf.Email, - Username: userpass[0], - Password: userpass[1], - ServerAddress: reg, - } - } - return c, nil -} - -// AuthCheck validates the given credentials. It returns nil if successful. -// -// See https://goo.gl/m2SleN for more details. -func (c *Client) AuthCheck(conf *AuthConfiguration) error { - if conf == nil { - return fmt.Errorf("conf is nil") - } - resp, err := c.do("POST", "/auth", doOptions{data: conf}) - if err != nil { - return err - } - resp.Body.Close() - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/auth_test.go b/vendor/github.com/fsouza/go-dockerclient/auth_test.go deleted file mode 100644 index e53b1760..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/auth_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/base64" - "fmt" - "net/http" - "strings" - "testing" -) - -func TestAuthLegacyConfig(t *testing.T) { - auth := base64.StdEncoding.EncodeToString([]byte("user:pa:ss")) - read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth)) - ac, err := NewAuthConfigurations(read) - if err != nil { - t.Error(err) - } - c, ok := ac.Configs["docker.io"] - if !ok { - t.Error("NewAuthConfigurations: Expected Configs to contain docker.io") - } - if got, want := c.Email, "user@example.com"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Username, "user"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Password, "pa:ss"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.ServerAddress, "docker.io"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got) - } -} - -func TestAuthBadConfig(t *testing.T) { - auth := base64.StdEncoding.EncodeToString([]byte("userpass")) - read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth)) - ac, err := NewAuthConfigurations(read) - if err != ErrCannotParseDockercfg { - t.Errorf("Incorrect error returned %v\n", err) - } - if ac != nil { - t.Errorf("Invalid auth configuration returned, should be nil %v\n", ac) - } -} - -func TestAuthConfig(t *testing.T) { - auth := base64.StdEncoding.EncodeToString([]byte("user:pass")) - read := strings.NewReader(fmt.Sprintf(`{"auths":{"docker.io":{"auth":"%s","email":"user@example.com"}}}`, auth)) - ac, err := NewAuthConfigurations(read) - if err != nil { - t.Error(err) - } - c, ok := ac.Configs["docker.io"] - if !ok { - t.Error("NewAuthConfigurations: Expected Configs to contain docker.io") - } - if got, want := c.Email, "user@example.com"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Username, "user"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Password, "pass"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.ServerAddress, "docker.io"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got) - } -} - -func TestAuthCheck(t *testing.T) { - fakeRT := &FakeRoundTripper{status: http.StatusOK} - client := newTestClient(fakeRT) - if err := client.AuthCheck(nil); err == nil { - t.Fatalf("expected error on nil auth config") - } - // test good auth - if err := client.AuthCheck(&AuthConfiguration{}); err != nil { - t.Fatal(err) - } - *fakeRT = FakeRoundTripper{status: http.StatusUnauthorized} - if err := client.AuthCheck(&AuthConfiguration{}); err == nil { - t.Fatal("expected failure from unauthorized auth") - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/build_test.go b/vendor/github.com/fsouza/go-dockerclient/build_test.go deleted file mode 100644 index c9640f20..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/build_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package docker - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "os" - "reflect" - "testing" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" -) - -func TestBuildImageMultipleContextsError(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - NoCache: true, - SuppressOutput: true, - RmTmpContainer: true, - ForceRmTmpContainer: true, - InputStream: &buf, - OutputStream: &buf, - ContextDir: "testing/data", - } - err := client.BuildImage(opts) - if err != ErrMultipleContexts { - t.Errorf("BuildImage: providing both InputStream and ContextDir should produce an error") - } -} - -func TestBuildImageContextDirDockerignoreParsing(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - - if err := os.Symlink("doesnotexist", "testing/data/symlink"); err != nil { - t.Errorf("error creating symlink on demand: %s", err) - } - defer func() { - if err := os.Remove("testing/data/symlink"); err != nil { - t.Errorf("error removing symlink on demand: %s", err) - } - }() - - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - NoCache: true, - SuppressOutput: true, - RmTmpContainer: true, - ForceRmTmpContainer: true, - OutputStream: &buf, - ContextDir: "testing/data", - } - err := client.BuildImage(opts) - if err != nil { - t.Fatal(err) - } - reqBody := fakeRT.requests[0].Body - tmpdir, err := unpackBodyTarball(reqBody) - if err != nil { - t.Fatal(err) - } - - defer func() { - if err := os.RemoveAll(tmpdir); err != nil { - t.Fatal(err) - } - }() - - files, err := ioutil.ReadDir(tmpdir) - if err != nil { - t.Fatal(err) - } - - foundFiles := []string{} - for _, file := range files { - foundFiles = append(foundFiles, file.Name()) - } - - expectedFiles := []string{ - ".dockerignore", - "Dockerfile", - "barfile", - "ca.pem", - "cert.pem", - "key.pem", - "server.pem", - "serverkey.pem", - "symlink", - } - - if !reflect.DeepEqual(expectedFiles, foundFiles) { - t.Errorf( - "BuildImage: incorrect files sent in tarball to docker server\nexpected %+v, found %+v", - expectedFiles, foundFiles, - ) - } -} - -func TestBuildImageSendXRegistryConfig(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - NoCache: true, - SuppressOutput: true, - RmTmpContainer: true, - ForceRmTmpContainer: true, - OutputStream: &buf, - ContextDir: "testing/data", - AuthConfigs: AuthConfigurations{ - Configs: map[string]AuthConfiguration{ - "quay.io": { - Username: "foo", - Password: "bar", - Email: "baz", - ServerAddress: "quay.io", - }, - }, - }, - } - - encodedConfig := "eyJjb25maWdzIjp7InF1YXkuaW8iOnsidXNlcm5hbWUiOiJmb28iLCJwYXNzd29yZCI6ImJhciIsImVtYWlsIjoiYmF6Iiwic2VydmVyYWRkcmVzcyI6InF1YXkuaW8ifX19Cg==" - - if err := client.BuildImage(opts); err != nil { - t.Fatal(err) - } - - xRegistryConfig := fakeRT.requests[0].Header["X-Registry-Config"][0] - if xRegistryConfig != encodedConfig { - t.Errorf( - "BuildImage: X-Registry-Config not set currectly: expected %q, got %q", - encodedConfig, - xRegistryConfig, - ) - } -} - -func unpackBodyTarball(req io.ReadCloser) (tmpdir string, err error) { - tmpdir, err = ioutil.TempDir("", "go-dockerclient-test") - if err != nil { - return - } - err = archive.Untar(req, tmpdir, &archive.TarOptions{ - Compression: archive.Uncompressed, - NoLchown: true, - }) - return -} diff --git a/vendor/github.com/fsouza/go-dockerclient/change.go b/vendor/github.com/fsouza/go-dockerclient/change.go deleted file mode 100644 index d133594d..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/change.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import "fmt" - -// ChangeType is a type for constants indicating the type of change -// in a container -type ChangeType int - -const ( - // ChangeModify is the ChangeType for container modifications - ChangeModify ChangeType = iota - - // ChangeAdd is the ChangeType for additions to a container - ChangeAdd - - // ChangeDelete is the ChangeType for deletions from a container - ChangeDelete -) - -// Change represents a change in a container. -// -// See https://goo.gl/9GsTIF for more details. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - var kind string - switch change.Kind { - case ChangeModify: - kind = "C" - case ChangeAdd: - kind = "A" - case ChangeDelete: - kind = "D" - } - return fmt.Sprintf("%s %s", kind, change.Path) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/change_test.go b/vendor/github.com/fsouza/go-dockerclient/change_test.go deleted file mode 100644 index 9418b183..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/change_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import "testing" - -func TestChangeString(t *testing.T) { - var tests = []struct { - change Change - expected string - }{ - {Change{"/etc/passwd", ChangeModify}, "C /etc/passwd"}, - {Change{"/etc/passwd", ChangeAdd}, "A /etc/passwd"}, - {Change{"/etc/passwd", ChangeDelete}, "D /etc/passwd"}, - {Change{"/etc/passwd", 33}, " /etc/passwd"}, - } - for _, tt := range tests { - if got := tt.change.String(); got != tt.expected { - t.Errorf("Change.String(): want %q. Got %q.", tt.expected, got) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go deleted file mode 100644 index a5fabd15..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/client.go +++ /dev/null @@ -1,885 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package docker provides a client for the Docker remote API. -// -// See https://goo.gl/G3plxW for more details on the remote API. -package docker - -import ( - "bufio" - "bytes" - "crypto/tls" - "crypto/x509" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "path/filepath" - "reflect" - "runtime" - "strconv" - "strings" - "time" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy" - "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp" -) - -const userAgent = "go-dockerclient" - -var ( - // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL. - ErrInvalidEndpoint = errors.New("invalid endpoint") - - // ErrConnectionRefused is returned when the client cannot connect to the given endpoint. - ErrConnectionRefused = errors.New("cannot connect to Docker endpoint") - - apiVersion112, _ = NewAPIVersion("1.12") - - apiVersion119, _ = NewAPIVersion("1.19") -) - -// APIVersion is an internal representation of a version of the Remote API. -type APIVersion []int - -// NewAPIVersion returns an instance of APIVersion for the given string. -// -// The given string must be in the form .., where , -// and are integer numbers. -func NewAPIVersion(input string) (APIVersion, error) { - if !strings.Contains(input, ".") { - return nil, fmt.Errorf("Unable to parse version %q", input) - } - arr := strings.Split(input, ".") - ret := make(APIVersion, len(arr)) - var err error - for i, val := range arr { - ret[i], err = strconv.Atoi(val) - if err != nil { - return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val) - } - } - return ret, nil -} - -func (version APIVersion) String() string { - var str string - for i, val := range version { - str += strconv.Itoa(val) - if i < len(version)-1 { - str += "." - } - } - return str -} - -// LessThan is a function for comparing APIVersion structs -func (version APIVersion) LessThan(other APIVersion) bool { - return version.compare(other) < 0 -} - -// LessThanOrEqualTo is a function for comparing APIVersion structs -func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool { - return version.compare(other) <= 0 -} - -// GreaterThan is a function for comparing APIVersion structs -func (version APIVersion) GreaterThan(other APIVersion) bool { - return version.compare(other) > 0 -} - -// GreaterThanOrEqualTo is a function for comparing APIVersion structs -func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool { - return version.compare(other) >= 0 -} - -func (version APIVersion) compare(other APIVersion) int { - for i, v := range version { - if i <= len(other)-1 { - otherVersion := other[i] - - if v < otherVersion { - return -1 - } else if v > otherVersion { - return 1 - } - } - } - if len(version) > len(other) { - return 1 - } - if len(version) < len(other) { - return -1 - } - return 0 -} - -// Client is the basic type of this package. It provides methods for -// interaction with the API. -type Client struct { - SkipServerVersionCheck bool - HTTPClient *http.Client - TLSConfig *tls.Config - Dialer *net.Dialer - - endpoint string - endpointURL *url.URL - eventMonitor *eventMonitoringState - requestedAPIVersion APIVersion - serverAPIVersion APIVersion - expectedAPIVersion APIVersion - unixHTTPClient *http.Client -} - -// NewClient returns a Client instance ready for communication with the given -// server endpoint. It will use the latest remote API version available in the -// server. -func NewClient(endpoint string) (*Client, error) { - client, err := NewVersionedClient(endpoint, "") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewTLSClient returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates . It will use the latest remote API version -// available in the server. -func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) { - client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates (passed inline to the function as opposed to being -// read from a local file). It will use the latest remote API version available in the server. -func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) { - client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewVersionedClient returns a Client instance ready for communication with -// the given server endpoint, using a specific remote API version. -func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) { - u, err := parseEndpoint(endpoint, false) - if err != nil { - return nil, err - } - var requestedAPIVersion APIVersion - if strings.Contains(apiVersionString, ".") { - requestedAPIVersion, err = NewAPIVersion(apiVersionString) - if err != nil { - return nil, err - } - } - return &Client{ - HTTPClient: cleanhttp.DefaultClient(), - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - eventMonitor: new(eventMonitoringState), - requestedAPIVersion: requestedAPIVersion, - }, nil -} - -// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient. -func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { - return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString) -} - -// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates, using a specific remote API version. -func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { - certPEMBlock, err := ioutil.ReadFile(cert) - if err != nil { - return nil, err - } - keyPEMBlock, err := ioutil.ReadFile(key) - if err != nil { - return nil, err - } - caPEMCert, err := ioutil.ReadFile(ca) - if err != nil { - return nil, err - } - return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString) -} - -// NewClientFromEnv returns a Client instance ready for communication created from -// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH. -// -// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. -// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. -func NewClientFromEnv() (*Client, error) { - client, err := NewVersionedClientFromEnv("") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from -// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH, -// and using a specific remote API version. -// -// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. -// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. -func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) { - dockerEnv, err := getDockerEnv() - if err != nil { - return nil, err - } - dockerHost := dockerEnv.dockerHost - if dockerEnv.dockerTLSVerify { - parts := strings.SplitN(dockerEnv.dockerHost, "://", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost) - } - cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem") - key := filepath.Join(dockerEnv.dockerCertPath, "key.pem") - ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem") - return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString) - } - return NewVersionedClient(dockerEnv.dockerHost, apiVersionString) -} - -// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates (passed inline to the function as opposed to being -// read from a local file), using a specific remote API version. -func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) { - u, err := parseEndpoint(endpoint, true) - if err != nil { - return nil, err - } - var requestedAPIVersion APIVersion - if strings.Contains(apiVersionString, ".") { - requestedAPIVersion, err = NewAPIVersion(apiVersionString) - if err != nil { - return nil, err - } - } - if certPEMBlock == nil || keyPEMBlock == nil { - return nil, errors.New("Both cert and key are required") - } - tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) - if err != nil { - return nil, err - } - tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}} - if caPEMCert == nil { - tlsConfig.InsecureSkipVerify = true - } else { - caPool := x509.NewCertPool() - if !caPool.AppendCertsFromPEM(caPEMCert) { - return nil, errors.New("Could not add RootCA pem") - } - tlsConfig.RootCAs = caPool - } - tr := cleanhttp.DefaultTransport() - tr.TLSClientConfig = tlsConfig - if err != nil { - return nil, err - } - return &Client{ - HTTPClient: &http.Client{Transport: tr}, - TLSConfig: tlsConfig, - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - eventMonitor: new(eventMonitoringState), - requestedAPIVersion: requestedAPIVersion, - }, nil -} - -func (c *Client) checkAPIVersion() error { - serverAPIVersionString, err := c.getServerAPIVersionString() - if err != nil { - return err - } - c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString) - if err != nil { - return err - } - if c.requestedAPIVersion == nil { - c.expectedAPIVersion = c.serverAPIVersion - } else { - c.expectedAPIVersion = c.requestedAPIVersion - } - return nil -} - -// Endpoint returns the current endpoint. It's useful for getting the endpoint -// when using functions that get this data from the environment (like -// NewClientFromEnv. -func (c *Client) Endpoint() string { - return c.endpoint -} - -// Ping pings the docker server -// -// See https://goo.gl/kQCfJj for more details. -func (c *Client) Ping() error { - path := "/_ping" - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK { - return newError(resp) - } - resp.Body.Close() - return nil -} - -func (c *Client) getServerAPIVersionString() (version string, err error) { - resp, err := c.do("GET", "/version", doOptions{}) - if err != nil { - return "", err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode) - } - var versionResponse map[string]interface{} - if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil { - return "", err - } - if version, ok := (versionResponse["ApiVersion"]).(string); ok { - return version, nil - } - return "", nil -} - -type doOptions struct { - data interface{} - forceJSON bool - headers map[string]string -} - -func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) { - var params io.Reader - if doOptions.data != nil || doOptions.forceJSON { - buf, err := json.Marshal(doOptions.data) - if err != nil { - return nil, err - } - params = bytes.NewBuffer(buf) - } - if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { - err := c.checkAPIVersion() - if err != nil { - return nil, err - } - } - httpClient := c.HTTPClient - protocol := c.endpointURL.Scheme - var u string - if protocol == "unix" { - httpClient = c.unixClient() - u = c.getFakeUnixURL(path) - } else { - u = c.getURL(path) - } - req, err := http.NewRequest(method, u, params) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", userAgent) - if doOptions.data != nil { - req.Header.Set("Content-Type", "application/json") - } else if method == "POST" { - req.Header.Set("Content-Type", "plain/text") - } - - for k, v := range doOptions.headers { - req.Header.Set(k, v) - } - resp, err := httpClient.Do(req) - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return nil, ErrConnectionRefused - } - return nil, err - } - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, newError(resp) - } - return resp, nil -} - -type streamOptions struct { - setRawTerminal bool - rawJSONStream bool - useJSONDecoder bool - headers map[string]string - in io.Reader - stdout io.Writer - stderr io.Writer - // timeout is the inital connection timeout - timeout time.Duration -} - -func (c *Client) stream(method, path string, streamOptions streamOptions) error { - if (method == "POST" || method == "PUT") && streamOptions.in == nil { - streamOptions.in = bytes.NewReader(nil) - } - if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { - err := c.checkAPIVersion() - if err != nil { - return err - } - } - req, err := http.NewRequest(method, c.getURL(path), streamOptions.in) - if err != nil { - return err - } - req.Header.Set("User-Agent", userAgent) - if method == "POST" { - req.Header.Set("Content-Type", "plain/text") - } - for key, val := range streamOptions.headers { - req.Header.Set(key, val) - } - var resp *http.Response - protocol := c.endpointURL.Scheme - address := c.endpointURL.Path - if streamOptions.stdout == nil { - streamOptions.stdout = ioutil.Discard - } - if streamOptions.stderr == nil { - streamOptions.stderr = ioutil.Discard - } - if protocol == "unix" { - dial, err := c.Dialer.Dial(protocol, address) - if err != nil { - return err - } - defer dial.Close() - breader := bufio.NewReader(dial) - err = req.Write(dial) - if err != nil { - return err - } - - // ReadResponse may hang if server does not replay - if streamOptions.timeout > 0 { - dial.SetDeadline(time.Now().Add(streamOptions.timeout)) - } - - if resp, err = http.ReadResponse(breader, req); err != nil { - // Cancel timeout for future I/O operations - if streamOptions.timeout > 0 { - dial.SetDeadline(time.Time{}) - } - if strings.Contains(err.Error(), "connection refused") { - return ErrConnectionRefused - } - return err - } - } else { - if resp, err = c.HTTPClient.Do(req); err != nil { - if strings.Contains(err.Error(), "connection refused") { - return ErrConnectionRefused - } - return err - } - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return newError(resp) - } - if streamOptions.useJSONDecoder || resp.Header.Get("Content-Type") == "application/json" { - // if we want to get raw json stream, just copy it back to output - // without decoding it - if streamOptions.rawJSONStream { - _, err = io.Copy(streamOptions.stdout, resp.Body) - return err - } - dec := json.NewDecoder(resp.Body) - for { - var m jsonMessage - if err := dec.Decode(&m); err == io.EOF { - break - } else if err != nil { - return err - } - if m.Stream != "" { - fmt.Fprint(streamOptions.stdout, m.Stream) - } else if m.Progress != "" { - fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress) - } else if m.Error != "" { - return errors.New(m.Error) - } - if m.Status != "" { - fmt.Fprintln(streamOptions.stdout, m.Status) - } - } - } else { - if streamOptions.setRawTerminal { - _, err = io.Copy(streamOptions.stdout, resp.Body) - } else { - _, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body) - } - return err - } - return nil -} - -type hijackOptions struct { - success chan struct{} - setRawTerminal bool - in io.Reader - stdout io.Writer - stderr io.Writer - data interface{} -} - -func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error { - if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { - err := c.checkAPIVersion() - if err != nil { - return err - } - } - var params io.Reader - if hijackOptions.data != nil { - buf, err := json.Marshal(hijackOptions.data) - if err != nil { - return err - } - params = bytes.NewBuffer(buf) - } - req, err := http.NewRequest(method, c.getURL(path), params) - if err != nil { - return err - } - req.Header.Set("Content-Type", "plain/text") - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "tcp") - protocol := c.endpointURL.Scheme - address := c.endpointURL.Path - if protocol != "unix" { - protocol = "tcp" - address = c.endpointURL.Host - } - var dial net.Conn - if c.TLSConfig != nil && protocol != "unix" { - dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig) - if err != nil { - return err - } - } else { - dial, err = c.Dialer.Dial(protocol, address) - if err != nil { - return err - } - } - clientconn := httputil.NewClientConn(dial, nil) - defer clientconn.Close() - clientconn.Do(req) - if hijackOptions.success != nil { - hijackOptions.success <- struct{}{} - <-hijackOptions.success - } - rwc, br := clientconn.Hijack() - defer rwc.Close() - errChanOut := make(chan error, 1) - errChanIn := make(chan error, 1) - if hijackOptions.stdout == nil && hijackOptions.stderr == nil { - close(errChanOut) - } else { - // Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set. - // Otherwise, if the only stream you care about is stdin, your attach session - // will "hang" until the container terminates, even though you're not reading - // stdout/stderr - if hijackOptions.stdout == nil { - hijackOptions.stdout = ioutil.Discard - } - if hijackOptions.stderr == nil { - hijackOptions.stderr = ioutil.Discard - } - - go func() { - defer func() { - if hijackOptions.in != nil { - if closer, ok := hijackOptions.in.(io.Closer); ok { - closer.Close() - } - errChanIn <- nil - } - }() - - var err error - if hijackOptions.setRawTerminal { - _, err = io.Copy(hijackOptions.stdout, br) - } else { - _, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br) - } - errChanOut <- err - }() - } - go func() { - var err error - if hijackOptions.in != nil { - _, err = io.Copy(rwc, hijackOptions.in) - } - errChanIn <- err - rwc.(interface { - CloseWrite() error - }).CloseWrite() - }() - errIn := <-errChanIn - errOut := <-errChanOut - if errIn != nil { - return errIn - } - return errOut -} - -func (c *Client) getURL(path string) string { - urlStr := strings.TrimRight(c.endpointURL.String(), "/") - if c.endpointURL.Scheme == "unix" { - urlStr = "" - } - if c.requestedAPIVersion != nil { - return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path) - } - return fmt.Sprintf("%s%s", urlStr, path) -} - -// getFakeUnixURL returns the URL needed to make an HTTP request over a UNIX -// domain socket to the given path. -func (c *Client) getFakeUnixURL(path string) string { - u := *c.endpointURL // Copy. - - // Override URL so that net/http will not complain. - u.Scheme = "http" - u.Host = "unix.sock" // Doesn't matter what this is - it's not used. - u.Path = "" - urlStr := strings.TrimRight(u.String(), "/") - if c.requestedAPIVersion != nil { - return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path) - } - return fmt.Sprintf("%s%s", urlStr, path) -} - -func (c *Client) unixClient() *http.Client { - if c.unixHTTPClient != nil { - return c.unixHTTPClient - } - socketPath := c.endpointURL.Path - c.unixHTTPClient = &http.Client{ - Transport: &http.Transport{ - Dial: func(network, addr string) (net.Conn, error) { - return c.Dialer.Dial("unix", socketPath) - }, - }, - } - return c.unixHTTPClient -} - -type jsonMessage struct { - Status string `json:"status,omitempty"` - Progress string `json:"progress,omitempty"` - Error string `json:"error,omitempty"` - Stream string `json:"stream,omitempty"` -} - -func queryString(opts interface{}) string { - if opts == nil { - return "" - } - value := reflect.ValueOf(opts) - if value.Kind() == reflect.Ptr { - value = value.Elem() - } - if value.Kind() != reflect.Struct { - return "" - } - items := url.Values(map[string][]string{}) - for i := 0; i < value.NumField(); i++ { - field := value.Type().Field(i) - if field.PkgPath != "" { - continue - } - key := field.Tag.Get("qs") - if key == "" { - key = strings.ToLower(field.Name) - } else if key == "-" { - continue - } - addQueryStringValue(items, key, value.Field(i)) - } - return items.Encode() -} - -func addQueryStringValue(items url.Values, key string, v reflect.Value) { - switch v.Kind() { - case reflect.Bool: - if v.Bool() { - items.Add(key, "1") - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if v.Int() > 0 { - items.Add(key, strconv.FormatInt(v.Int(), 10)) - } - case reflect.Float32, reflect.Float64: - if v.Float() > 0 { - items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) - } - case reflect.String: - if v.String() != "" { - items.Add(key, v.String()) - } - case reflect.Ptr: - if !v.IsNil() { - if b, err := json.Marshal(v.Interface()); err == nil { - items.Add(key, string(b)) - } - } - case reflect.Map: - if len(v.MapKeys()) > 0 { - if b, err := json.Marshal(v.Interface()); err == nil { - items.Add(key, string(b)) - } - } - case reflect.Array, reflect.Slice: - vLen := v.Len() - if vLen > 0 { - for i := 0; i < vLen; i++ { - addQueryStringValue(items, key, v.Index(i)) - } - } - } -} - -// Error represents failures in the API. It represents a failure from the API. -type Error struct { - Status int - Message string -} - -func newError(resp *http.Response) *Error { - defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)} - } - return &Error{Status: resp.StatusCode, Message: string(data)} -} - -func (e *Error) Error() string { - return fmt.Sprintf("API error (%d): %s", e.Status, e.Message) -} - -func parseEndpoint(endpoint string, tls bool) (*url.URL, error) { - if endpoint != "" && !strings.Contains(endpoint, "://") { - endpoint = "tcp://" + endpoint - } - u, err := url.Parse(endpoint) - if err != nil { - return nil, ErrInvalidEndpoint - } - if tls { - u.Scheme = "https" - } - switch u.Scheme { - case "unix": - return u, nil - case "http", "https", "tcp": - _, port, err := net.SplitHostPort(u.Host) - if err != nil { - if e, ok := err.(*net.AddrError); ok { - if e.Err == "missing port in address" { - return u, nil - } - } - return nil, ErrInvalidEndpoint - } - number, err := strconv.ParseInt(port, 10, 64) - if err == nil && number > 0 && number < 65536 { - if u.Scheme == "tcp" { - if tls { - u.Scheme = "https" - } else { - u.Scheme = "http" - } - } - return u, nil - } - return nil, ErrInvalidEndpoint - default: - return nil, ErrInvalidEndpoint - } -} - -type dockerEnv struct { - dockerHost string - dockerTLSVerify bool - dockerCertPath string -} - -func getDockerEnv() (*dockerEnv, error) { - dockerHost := os.Getenv("DOCKER_HOST") - var err error - if dockerHost == "" { - dockerHost, err = DefaultDockerHost() - if err != nil { - return nil, err - } - } - dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != "" - var dockerCertPath string - if dockerTLSVerify { - dockerCertPath = os.Getenv("DOCKER_CERT_PATH") - if dockerCertPath == "" { - home := homedir.Get() - if home == "" { - return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set") - } - dockerCertPath = filepath.Join(home, ".docker") - dockerCertPath, err = filepath.Abs(dockerCertPath) - if err != nil { - return nil, err - } - } - } - return &dockerEnv{ - dockerHost: dockerHost, - dockerTLSVerify: dockerTLSVerify, - dockerCertPath: dockerCertPath, - }, nil -} - -// DefaultDockerHost returns the default docker socket for the current OS -func DefaultDockerHost() (string, error) { - var defaultHost string - if runtime.GOOS == "windows" { - // If we do not have a host, default to TCP socket on Windows - defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort) - } else { - // If we do not have a host, default to unix socket - defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket) - } - return opts.ValidateHost(defaultHost) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/client_test.go b/vendor/github.com/fsouza/go-dockerclient/client_test.go deleted file mode 100644 index 72135275..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/client_test.go +++ /dev/null @@ -1,518 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "reflect" - "strconv" - "strings" - "testing" - "time" - - "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp" -) - -func TestNewAPIClient(t *testing.T) { - endpoint := "http://localhost:4243" - client, err := NewClient(endpoint) - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - // test unix socket endpoints - endpoint = "unix:///var/run/docker.sock" - client, err = NewClient(endpoint) - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if !client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be true, got false") - } - if client.requestedAPIVersion != nil { - t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion) - } -} - -func newTLSClient(endpoint string) (*Client, error) { - return NewTLSClient(endpoint, - "testing/data/cert.pem", - "testing/data/key.pem", - "testing/data/ca.pem") -} - -func TestNewTSLAPIClient(t *testing.T) { - endpoint := "https://localhost:4243" - client, err := newTLSClient(endpoint) - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if !client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be true, got false") - } - if client.requestedAPIVersion != nil { - t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion) - } -} - -func TestNewVersionedClient(t *testing.T) { - endpoint := "http://localhost:4243" - client, err := NewVersionedClient(endpoint, "1.12") - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" { - t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion) - } - if client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be false, got true") - } -} - -func TestNewVersionedClientFromEnv(t *testing.T) { - endpoint := "tcp://localhost:2376" - endpointURL := "http://localhost:2376" - os.Setenv("DOCKER_HOST", endpoint) - os.Setenv("DOCKER_TLS_VERIFY", "") - client, err := NewVersionedClientFromEnv("1.12") - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if client.endpointURL.String() != endpointURL { - t.Errorf("Expected endpointURL %s. Got %s.", endpoint, client.endpoint) - } - if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" { - t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion) - } - if client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be false, got true") - } -} - -func TestNewVersionedClientFromEnvTLS(t *testing.T) { - endpoint := "tcp://localhost:2376" - endpointURL := "https://localhost:2376" - base, _ := os.Getwd() - os.Setenv("DOCKER_CERT_PATH", filepath.Join(base, "/testing/data/")) - os.Setenv("DOCKER_HOST", endpoint) - os.Setenv("DOCKER_TLS_VERIFY", "1") - client, err := NewVersionedClientFromEnv("1.12") - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if client.endpointURL.String() != endpointURL { - t.Errorf("Expected endpointURL %s. Got %s.", endpoint, client.endpoint) - } - if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" { - t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion) - } - if client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be false, got true") - } -} - -func TestNewTLSVersionedClient(t *testing.T) { - certPath := "testing/data/cert.pem" - keyPath := "testing/data/key.pem" - caPath := "testing/data/ca.pem" - endpoint := "https://localhost:4243" - client, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14") - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.14" { - t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.14", reqVersion) - } - if client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be false, got true") - } -} - -func TestNewTLSVersionedClientInvalidCA(t *testing.T) { - certPath := "testing/data/cert.pem" - keyPath := "testing/data/key.pem" - caPath := "testing/data/key.pem" - endpoint := "https://localhost:4243" - _, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14") - if err == nil { - t.Errorf("Expected invalid ca at %s", caPath) - } -} - -func TestNewClientInvalidEndpoint(t *testing.T) { - cases := []string{ - "htp://localhost:3243", "http://localhost:a", - "", "http://localhost:8080:8383", "http://localhost:65536", - "https://localhost:-20", - } - for _, c := range cases { - client, err := NewClient(c) - if client != nil { - t.Errorf("Want client for invalid endpoint, got %#v.", client) - } - if !reflect.DeepEqual(err, ErrInvalidEndpoint) { - t.Errorf("NewClient(%q): Got invalid error for invalid endpoint. Want %#v. Got %#v.", c, ErrInvalidEndpoint, err) - } - } -} - -func TestNewClientNoSchemeEndpoint(t *testing.T) { - cases := []string{"localhost", "localhost:8080"} - for _, c := range cases { - client, err := NewClient(c) - if client == nil { - t.Errorf("Want client for scheme-less endpoint, got ") - } - if err != nil { - t.Errorf("Got unexpected error scheme-less endpoint: %q", err) - } - } -} - -func TestNewTLSClient(t *testing.T) { - var tests = []struct { - endpoint string - expected string - }{ - {"tcp://localhost:2376", "https"}, - {"tcp://localhost:2375", "https"}, - {"tcp://localhost:4000", "https"}, - {"http://localhost:4000", "https"}, - } - for _, tt := range tests { - client, err := newTLSClient(tt.endpoint) - if err != nil { - t.Error(err) - } - got := client.endpointURL.Scheme - if got != tt.expected { - t.Errorf("endpointURL.Scheme: Got %s. Want %s.", got, tt.expected) - } - } -} - -func TestEndpoint(t *testing.T) { - client, err := NewVersionedClient("http://localhost:4243", "1.12") - if err != nil { - t.Fatal(err) - } - if endpoint := client.Endpoint(); endpoint != client.endpoint { - t.Errorf("Client.Endpoint(): want %q. Got %q", client.endpoint, endpoint) - } -} - -func TestGetURL(t *testing.T) { - var tests = []struct { - endpoint string - path string - expected string - }{ - {"http://localhost:4243/", "/", "http://localhost:4243/"}, - {"http://localhost:4243", "/", "http://localhost:4243/"}, - {"http://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"}, - {"tcp://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"}, - {"http://localhost:4243/////", "/", "http://localhost:4243/"}, - {"unix:///var/run/docker.socket", "/containers", "/containers"}, - } - for _, tt := range tests { - client, _ := NewClient(tt.endpoint) - client.endpoint = tt.endpoint - client.SkipServerVersionCheck = true - got := client.getURL(tt.path) - if got != tt.expected { - t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected) - } - } -} - -func TestGetFakeUnixURL(t *testing.T) { - var tests = []struct { - endpoint string - path string - expected string - }{ - {"unix://var/run/docker.sock", "/", "http://unix.sock/"}, - {"unix://var/run/docker.socket", "/", "http://unix.sock/"}, - {"unix://var/run/docker.sock", "/containers/ps", "http://unix.sock/containers/ps"}, - } - for _, tt := range tests { - client, _ := NewClient(tt.endpoint) - client.endpoint = tt.endpoint - client.SkipServerVersionCheck = true - got := client.getFakeUnixURL(tt.path) - if got != tt.expected { - t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected) - } - } -} - -func TestError(t *testing.T) { - fakeBody := ioutil.NopCloser(bytes.NewBufferString("bad parameter")) - resp := &http.Response{ - StatusCode: 400, - Body: fakeBody, - } - err := newError(resp) - expected := Error{Status: 400, Message: "bad parameter"} - if !reflect.DeepEqual(expected, *err) { - t.Errorf("Wrong error type. Want %#v. Got %#v.", expected, *err) - } - message := "API error (400): bad parameter" - if err.Error() != message { - t.Errorf("Wrong error message. Want %q. Got %q.", message, err.Error()) - } -} - -func TestQueryString(t *testing.T) { - v := float32(2.4) - f32QueryString := fmt.Sprintf("w=%s&x=10&y=10.35", strconv.FormatFloat(float64(v), 'f', -1, 64)) - jsonPerson := url.QueryEscape(`{"Name":"gopher","age":4}`) - var tests = []struct { - input interface{} - want string - }{ - {&ListContainersOptions{All: true}, "all=1"}, - {ListContainersOptions{All: true}, "all=1"}, - {ListContainersOptions{Before: "something"}, "before=something"}, - {ListContainersOptions{Before: "something", Since: "other"}, "before=something&since=other"}, - {ListContainersOptions{Filters: map[string][]string{"status": {"paused", "running"}}}, "filters=%7B%22status%22%3A%5B%22paused%22%2C%22running%22%5D%7D"}, - {dumb{X: 10, Y: 10.35000}, "x=10&y=10.35"}, - {dumb{W: v, X: 10, Y: 10.35000}, f32QueryString}, - {dumb{X: 10, Y: 10.35000, Z: 10}, "x=10&y=10.35&zee=10"}, - {dumb{v: 4, X: 10, Y: 10.35000}, "x=10&y=10.35"}, - {dumb{T: 10, Y: 10.35000}, "y=10.35"}, - {dumb{Person: &person{Name: "gopher", Age: 4}}, "p=" + jsonPerson}, - {nil, ""}, - {10, ""}, - {"not_a_struct", ""}, - } - for _, tt := range tests { - got := queryString(tt.input) - if got != tt.want { - t.Errorf("queryString(%v). Want %q. Got %q.", tt.input, tt.want, got) - } - } -} - -func TestNewAPIVersionFailures(t *testing.T) { - var tests = []struct { - input string - expectedError string - }{ - {"1-0", `Unable to parse version "1-0"`}, - {"1.0-beta", `Unable to parse version "1.0-beta": "0-beta" is not an integer`}, - } - for _, tt := range tests { - v, err := NewAPIVersion(tt.input) - if v != nil { - t.Errorf("Expected version, got %v.", v) - } - if err.Error() != tt.expectedError { - t.Errorf("NewAPIVersion(%q): wrong error. Want %q. Got %q", tt.input, tt.expectedError, err.Error()) - } - } -} - -func TestAPIVersions(t *testing.T) { - var tests = []struct { - a string - b string - expectedALessThanB bool - expectedALessThanOrEqualToB bool - expectedAGreaterThanB bool - expectedAGreaterThanOrEqualToB bool - }{ - {"1.11", "1.11", false, true, false, true}, - {"1.10", "1.11", true, true, false, false}, - {"1.11", "1.10", false, false, true, true}, - - {"1.9", "1.11", true, true, false, false}, - {"1.11", "1.9", false, false, true, true}, - - {"1.1.1", "1.1", false, false, true, true}, - {"1.1", "1.1.1", true, true, false, false}, - - {"2.1", "1.1.1", false, false, true, true}, - {"2.1", "1.3.1", false, false, true, true}, - {"1.1.1", "2.1", true, true, false, false}, - {"1.3.1", "2.1", true, true, false, false}, - } - - for _, tt := range tests { - a, _ := NewAPIVersion(tt.a) - b, _ := NewAPIVersion(tt.b) - - if tt.expectedALessThanB && !a.LessThan(b) { - t.Errorf("Expected %#v < %#v", a, b) - } - if tt.expectedALessThanOrEqualToB && !a.LessThanOrEqualTo(b) { - t.Errorf("Expected %#v <= %#v", a, b) - } - if tt.expectedAGreaterThanB && !a.GreaterThan(b) { - t.Errorf("Expected %#v > %#v", a, b) - } - if tt.expectedAGreaterThanOrEqualToB && !a.GreaterThanOrEqualTo(b) { - t.Errorf("Expected %#v >= %#v", a, b) - } - } -} - -func TestPing(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - err := client.Ping() - if err != nil { - t.Fatal(err) - } -} - -func TestPingFailing(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusInternalServerError} - client := newTestClient(fakeRT) - err := client.Ping() - if err == nil { - t.Fatal("Expected non nil error, got nil") - } - expectedErrMsg := "API error (500): " - if err.Error() != expectedErrMsg { - t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error()) - } -} - -func TestPingFailingWrongStatus(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusAccepted} - client := newTestClient(fakeRT) - err := client.Ping() - if err == nil { - t.Fatal("Expected non nil error, got nil") - } - expectedErrMsg := "API error (202): " - if err.Error() != expectedErrMsg { - t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error()) - } -} - -func TestPingErrorWithUnixSocket(t *testing.T) { - go func() { - li, err := net.Listen("unix", "/tmp/echo.sock") - if err != nil { - t.Fatal(err) - } - defer li.Close() - if err != nil { - t.Fatalf("Expected to get listener, but failed: %#v", err) - } - - fd, err := li.Accept() - if err != nil { - t.Fatalf("Expected to accept connection, but failed: %#v", err) - } - - buf := make([]byte, 512) - nr, err := fd.Read(buf) - - // Create invalid response message to trigger error. - data := buf[0:nr] - for i := 0; i < 10; i++ { - data[i] = 63 - } - - _, err = fd.Write(data) - if err != nil { - t.Fatalf("Expected to write to socket, but failed: %#v", err) - } - - return - }() - - // Wait for unix socket to listen - time.Sleep(10 * time.Millisecond) - - endpoint := "unix:///tmp/echo.sock" - u, _ := parseEndpoint(endpoint, false) - client := Client{ - HTTPClient: cleanhttp.DefaultClient(), - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - SkipServerVersionCheck: true, - } - - err := client.Ping() - if err == nil { - t.Fatal("Expected non nil error, got nil") - } -} - -type FakeRoundTripper struct { - message string - status int - header map[string]string - requests []*http.Request -} - -func (rt *FakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - body := strings.NewReader(rt.message) - rt.requests = append(rt.requests, r) - res := &http.Response{ - StatusCode: rt.status, - Body: ioutil.NopCloser(body), - Header: make(http.Header), - } - for k, v := range rt.header { - res.Header.Set(k, v) - } - return res, nil -} - -func (rt *FakeRoundTripper) Reset() { - rt.requests = nil -} - -type person struct { - Name string - Age int `json:"age"` -} - -type dumb struct { - T int `qs:"-"` - v int - W float32 - X int - Y float64 - Z int `qs:"zee"` - Person *person `qs:"p"` -} - -type fakeEndpointURL struct { - Scheme string -} diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go deleted file mode 100644 index 1b3e17e3..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/container.go +++ /dev/null @@ -1,1159 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// ErrContainerAlreadyExists is the error returned by CreateContainer when the -// container already exists. -var ErrContainerAlreadyExists = errors.New("container already exists") - -// ListContainersOptions specify parameters to the ListContainers function. -// -// See https://goo.gl/47a6tO for more details. -type ListContainersOptions struct { - All bool - Size bool - Limit int - Since string - Before string - Filters map[string][]string -} - -// APIPort is a type that represents a port mapping returned by the Docker API -type APIPort struct { - PrivatePort int64 `json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty"` - PublicPort int64 `json:"PublicPort,omitempty" yaml:"PublicPort,omitempty"` - Type string `json:"Type,omitempty" yaml:"Type,omitempty"` - IP string `json:"IP,omitempty" yaml:"IP,omitempty"` -} - -// APIContainers represents each container in the list returned by -// ListContainers. -type APIContainers struct { - ID string `json:"Id" yaml:"Id"` - Image string `json:"Image,omitempty" yaml:"Image,omitempty"` - Command string `json:"Command,omitempty" yaml:"Command,omitempty"` - Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` - Status string `json:"Status,omitempty" yaml:"Status,omitempty"` - Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty"` - SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty"` - SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty"` - Names []string `json:"Names,omitempty" yaml:"Names,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels, omitempty"` -} - -// ListContainers returns a slice of containers matching the given criteria. -// -// See https://goo.gl/47a6tO for more details. -func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) { - path := "/containers/json?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var containers []APIContainers - if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil { - return nil, err - } - return containers, nil -} - -// Port represents the port number and the protocol, in the form -// /. For example: 80/tcp. -type Port string - -// Port returns the number of the port. -func (p Port) Port() string { - return strings.Split(string(p), "/")[0] -} - -// Proto returns the name of the protocol. -func (p Port) Proto() string { - parts := strings.Split(string(p), "/") - if len(parts) == 1 { - return "tcp" - } - return parts[1] -} - -// State represents the state of a container. -type State struct { - Running bool `json:"Running,omitempty" yaml:"Running,omitempty"` - Paused bool `json:"Paused,omitempty" yaml:"Paused,omitempty"` - Restarting bool `json:"Restarting,omitempty" yaml:"Restarting,omitempty"` - OOMKilled bool `json:"OOMKilled,omitempty" yaml:"OOMKilled,omitempty"` - Pid int `json:"Pid,omitempty" yaml:"Pid,omitempty"` - ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"` - Error string `json:"Error,omitempty" yaml:"Error,omitempty"` - StartedAt time.Time `json:"StartedAt,omitempty" yaml:"StartedAt,omitempty"` - FinishedAt time.Time `json:"FinishedAt,omitempty" yaml:"FinishedAt,omitempty"` -} - -// String returns the string representation of a state. -func (s *State) String() string { - if s.Running { - if s.Paused { - return "paused" - } - return fmt.Sprintf("Up %s", time.Now().UTC().Sub(s.StartedAt)) - } - return fmt.Sprintf("Exit %d", s.ExitCode) -} - -// PortBinding represents the host/container port mapping as returned in the -// `docker inspect` json -type PortBinding struct { - HostIP string `json:"HostIP,omitempty" yaml:"HostIP,omitempty"` - HostPort string `json:"HostPort,omitempty" yaml:"HostPort,omitempty"` -} - -// PortMapping represents a deprecated field in the `docker inspect` output, -// and its value as found in NetworkSettings should always be nil -type PortMapping map[string]string - -// ContainerNetwork represents the networking settings of a container per network. -type ContainerNetwork struct { - MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` - GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty"` - GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty"` - IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty"` - IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"` - IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"` - Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"` - EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty"` -} - -// NetworkSettings contains network-related information about a container -type NetworkSettings struct { - Networks map[string]ContainerNetwork `json:"Networks,omitempty" yaml:"Networks,omitempty"` - IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"` - IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"` - MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` - Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"` - Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty"` - PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty"` - Ports map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty"` - NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty"` - EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty"` - SandboxKey string `json:"SandboxKey,omitempty" yaml:"SandboxKey,omitempty"` - GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty"` - GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty"` - IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty"` - LinkLocalIPv6Address string `json:"LinkLocalIPv6Address,omitempty" yaml:"LinkLocalIPv6Address,omitempty"` - LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen,omitempty" yaml:"LinkLocalIPv6PrefixLen,omitempty"` - SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty" yaml:"SecondaryIPAddresses,omitempty"` - SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty" yaml:"SecondaryIPv6Addresses,omitempty"` -} - -// PortMappingAPI translates the port mappings as contained in NetworkSettings -// into the format in which they would appear when returned by the API -func (settings *NetworkSettings) PortMappingAPI() []APIPort { - var mapping []APIPort - for port, bindings := range settings.Ports { - p, _ := parsePort(port.Port()) - if len(bindings) == 0 { - mapping = append(mapping, APIPort{ - PrivatePort: int64(p), - Type: port.Proto(), - }) - continue - } - for _, binding := range bindings { - p, _ := parsePort(port.Port()) - h, _ := parsePort(binding.HostPort) - mapping = append(mapping, APIPort{ - PrivatePort: int64(p), - PublicPort: int64(h), - Type: port.Proto(), - IP: binding.HostIP, - }) - } - } - return mapping -} - -func parsePort(rawPort string) (int, error) { - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// Config is the list of configuration options used when creating a container. -// Config does not contain the options that are specific to starting a container on a -// given host. Those are contained in HostConfig -type Config struct { - Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"` - Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"` - User string `json:"User,omitempty" yaml:"User,omitempty"` - Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` - MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` - MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty"` - KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty"` - CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` - CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` - AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` - AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` - AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` - PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"` - ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"` - StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty"` - Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` - StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"` - Env []string `json:"Env,omitempty" yaml:"Env,omitempty"` - Cmd []string `json:"Cmd" yaml:"Cmd"` - DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only - Image string `json:"Image,omitempty" yaml:"Image,omitempty"` - Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` - VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"` - VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` - WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"` - MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` - Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"` - NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"` - SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"` - OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"` - Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` -} - -// Mount represents a mount point in the container. -// -// It has been added in the version 1.20 of the Docker API, available since -// Docker 1.8. -type Mount struct { - Source string - Destination string - Mode string - RW bool -} - -// LogConfig defines the log driver type and the configuration for it. -type LogConfig struct { - Type string `json:"Type,omitempty" yaml:"Type,omitempty"` - Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty"` -} - -// ULimit defines system-wide resource limitations -// This can help a lot in system administration, e.g. when a user starts too many processes and therefore makes the system unresponsive for other users. -type ULimit struct { - Name string `json:"Name,omitempty" yaml:"Name,omitempty"` - Soft int64 `json:"Soft,omitempty" yaml:"Soft,omitempty"` - Hard int64 `json:"Hard,omitempty" yaml:"Hard,omitempty"` -} - -// SwarmNode containers information about which Swarm node the container is on -type SwarmNode struct { - ID string `json:"ID,omitempty" yaml:"ID,omitempty"` - IP string `json:"IP,omitempty" yaml:"IP,omitempty"` - Addr string `json:"Addr,omitempty" yaml:"Addr,omitempty"` - Name string `json:"Name,omitempty" yaml:"Name,omitempty"` - CPUs int64 `json:"CPUs,omitempty" yaml:"CPUs,omitempty"` - Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` -} - -// Container is the type encompasing everything about a container - its config, -// hostconfig, etc. -type Container struct { - ID string `json:"Id" yaml:"Id"` - - Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"` - - Path string `json:"Path,omitempty" yaml:"Path,omitempty"` - Args []string `json:"Args,omitempty" yaml:"Args,omitempty"` - - Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"` - State State `json:"State,omitempty" yaml:"State,omitempty"` - Image string `json:"Image,omitempty" yaml:"Image,omitempty"` - - Node *SwarmNode `json:"Node,omitempty" yaml:"Node,omitempty"` - - NetworkSettings *NetworkSettings `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty"` - - SysInitPath string `json:"SysInitPath,omitempty" yaml:"SysInitPath,omitempty"` - ResolvConfPath string `json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty"` - HostnamePath string `json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty"` - HostsPath string `json:"HostsPath,omitempty" yaml:"HostsPath,omitempty"` - LogPath string `json:"LogPath,omitempty" yaml:"LogPath,omitempty"` - Name string `json:"Name,omitempty" yaml:"Name,omitempty"` - Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"` - Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"` - - Volumes map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` - VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty"` - HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"` - ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty"` - - RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty"` - - AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty"` -} - -// RenameContainerOptions specify parameters to the RenameContainer function. -// -// See https://goo.gl/laSOIy for more details. -type RenameContainerOptions struct { - // ID of container to rename - ID string `qs:"-"` - - // New name - Name string `json:"name,omitempty" yaml:"name,omitempty"` -} - -// RenameContainer updates and existing containers name -// -// See https://goo.gl/laSOIy for more details. -func (c *Client) RenameContainer(opts RenameContainerOptions) error { - resp, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{}) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// InspectContainer returns information about a container by its ID. -// -// See https://goo.gl/RdIq0b for more details. -func (c *Client) InspectContainer(id string) (*Container, error) { - path := "/containers/" + id + "/json" - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var container Container - if err := json.NewDecoder(resp.Body).Decode(&container); err != nil { - return nil, err - } - return &container, nil -} - -// ContainerChanges returns changes in the filesystem of the given container. -// -// See https://goo.gl/9GsTIF for more details. -func (c *Client) ContainerChanges(id string) ([]Change, error) { - path := "/containers/" + id + "/changes" - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var changes []Change - if err := json.NewDecoder(resp.Body).Decode(&changes); err != nil { - return nil, err - } - return changes, nil -} - -// CreateContainerOptions specify parameters to the CreateContainer function. -// -// See https://goo.gl/WxQzrr for more details. -type CreateContainerOptions struct { - Name string - Config *Config `qs:"-"` - HostConfig *HostConfig `qs:"-"` -} - -// CreateContainer creates a new container, returning the container instance, -// or an error in case of failure. -// -// See https://goo.gl/WxQzrr for more details. -func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) { - path := "/containers/create?" + queryString(opts) - resp, err := c.do( - "POST", - path, - doOptions{ - data: struct { - *Config - HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"` - }{ - opts.Config, - opts.HostConfig, - }, - }, - ) - - if e, ok := err.(*Error); ok { - if e.Status == http.StatusNotFound { - return nil, ErrNoSuchImage - } - if e.Status == http.StatusConflict { - return nil, ErrContainerAlreadyExists - } - } - - if err != nil { - return nil, err - } - defer resp.Body.Close() - var container Container - if err := json.NewDecoder(resp.Body).Decode(&container); err != nil { - return nil, err - } - - container.Name = opts.Name - - return &container, nil -} - -// KeyValuePair is a type for generic key/value pairs as used in the Lxc -// configuration -type KeyValuePair struct { - Key string `json:"Key,omitempty" yaml:"Key,omitempty"` - Value string `json:"Value,omitempty" yaml:"Value,omitempty"` -} - -// RestartPolicy represents the policy for automatically restarting a container. -// -// Possible values are: -// -// - always: the docker daemon will always restart the container -// - on-failure: the docker daemon will restart the container on failures, at -// most MaximumRetryCount times -// - no: the docker daemon will not restart the container automatically -type RestartPolicy struct { - Name string `json:"Name,omitempty" yaml:"Name,omitempty"` - MaximumRetryCount int `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty"` -} - -// AlwaysRestart returns a restart policy that tells the Docker daemon to -// always restart the container. -func AlwaysRestart() RestartPolicy { - return RestartPolicy{Name: "always"} -} - -// RestartOnFailure returns a restart policy that tells the Docker daemon to -// restart the container on failures, trying at most maxRetry times. -func RestartOnFailure(maxRetry int) RestartPolicy { - return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry} -} - -// NeverRestart returns a restart policy that tells the Docker daemon to never -// restart the container on failures. -func NeverRestart() RestartPolicy { - return RestartPolicy{Name: "no"} -} - -// Device represents a device mapping between the Docker host and the -// container. -type Device struct { - PathOnHost string `json:"PathOnHost,omitempty" yaml:"PathOnHost,omitempty"` - PathInContainer string `json:"PathInContainer,omitempty" yaml:"PathInContainer,omitempty"` - CgroupPermissions string `json:"CgroupPermissions,omitempty" yaml:"CgroupPermissions,omitempty"` -} - -// HostConfig contains the container options related to starting a container on -// a given host -type HostConfig struct { - Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty"` - CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty"` - CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty"` - GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty"` - ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"` - LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"` - Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"` - PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty"` - Links []string `json:"Links,omitempty" yaml:"Links,omitempty"` - PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"` - DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.10 and above only - DNSOptions []string `json:"DnsOptions,omitempty" yaml:"DnsOptions,omitempty"` - DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty"` - ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"` - VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` - NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"` - IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"` - PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty"` - UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty"` - RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"` - Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"` - LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"` - ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"` - SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"` - CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty"` - Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` - MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` - MemorySwappiness int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty"` - OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable"` - CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` - CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` - CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty"` - CPUSetMEMs string `json:"CpusetMems,omitempty" yaml:"CpusetMems,omitempty"` - CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty"` - CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty"` - BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight"` - Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"` - VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"` -} - -// StartContainer starts a container, returning an error in case of failure. -// -// See https://goo.gl/MrBAJv for more details. -func (c *Client) StartContainer(id string, hostConfig *HostConfig) error { - path := "/containers/" + id + "/start" - resp, err := c.do("POST", path, doOptions{data: hostConfig, forceJSON: true}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id, Err: err} - } - return err - } - if resp.StatusCode == http.StatusNotModified { - return &ContainerAlreadyRunning{ID: id} - } - resp.Body.Close() - return nil -} - -// StopContainer stops a container, killing it after the given timeout (in -// seconds). -// -// See https://goo.gl/USqsFt for more details. -func (c *Client) StopContainer(id string, timeout uint) error { - path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - return err - } - if resp.StatusCode == http.StatusNotModified { - return &ContainerNotRunning{ID: id} - } - resp.Body.Close() - return nil -} - -// RestartContainer stops a container, killing it after the given timeout (in -// seconds), during the stop process. -// -// See https://goo.gl/QzsDnz for more details. -func (c *Client) RestartContainer(id string, timeout uint) error { - path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - return err - } - resp.Body.Close() - return nil -} - -// PauseContainer pauses the given container. -// -// See https://goo.gl/OF7W9X for more details. -func (c *Client) PauseContainer(id string) error { - path := fmt.Sprintf("/containers/%s/pause", id) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - return err - } - resp.Body.Close() - return nil -} - -// UnpauseContainer unpauses the given container. -// -// See https://goo.gl/7dwyPA for more details. -func (c *Client) UnpauseContainer(id string) error { - path := fmt.Sprintf("/containers/%s/unpause", id) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - return err - } - resp.Body.Close() - return nil -} - -// TopResult represents the list of processes running in a container, as -// returned by /containers//top. -// -// See https://goo.gl/Rb46aY for more details. -type TopResult struct { - Titles []string - Processes [][]string -} - -// TopContainer returns processes running inside a container -// -// See https://goo.gl/Rb46aY for more details. -func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) { - var args string - var result TopResult - if psArgs != "" { - args = fmt.Sprintf("?ps_args=%s", psArgs) - } - path := fmt.Sprintf("/containers/%s/top%s", id, args) - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return result, &NoSuchContainer{ID: id} - } - return result, err - } - defer resp.Body.Close() - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { - return result, err - } - return result, nil -} - -// Stats represents container statistics, returned by /containers//stats. -// -// See https://goo.gl/GNmLHb for more details. -type Stats struct { - Read time.Time `json:"read,omitempty" yaml:"read,omitempty"` - Network struct { - RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty"` - RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty"` - RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty"` - TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty"` - TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty"` - RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty"` - TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty"` - TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty"` - } `json:"network,omitempty" yaml:"network,omitempty"` - MemoryStats struct { - Stats struct { - TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty"` - Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty"` - MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty"` - TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty"` - Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty"` - Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty"` - TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty"` - Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty"` - Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty"` - Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty"` - TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty"` - Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty"` - TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty"` - TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty"` - TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty"` - TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty"` - RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty"` - HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty"` - TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty"` - TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty"` - ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty"` - TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty"` - TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty"` - TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty"` - InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty"` - ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty"` - Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty"` - InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty"` - TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty"` - } `json:"stats,omitempty" yaml:"stats,omitempty"` - MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty"` - Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty"` - Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty"` - Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty"` - } `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty"` - BlkioStats struct { - IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty"` - IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty"` - IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty"` - IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty"` - IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty"` - IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty"` - IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty"` - SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty"` - } `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty"` - CPUStats CPUStats `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` -} - -// CPUStats is a stats entry for cpu stats -type CPUStats struct { - CPUUsage struct { - PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty"` - UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty"` - TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty"` - UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty"` - } `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty"` - SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty"` - ThrottlingData struct { - Periods uint64 `json:"periods,omitempty"` - ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` - ThrottledTime uint64 `json:"throttled_time,omitempty"` - } `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty"` -} - -// BlkioStatsEntry is a stats entry for blkio_stats -type BlkioStatsEntry struct { - Major uint64 `json:"major,omitempty" yaml:"major,omitempty"` - Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty"` - Op string `json:"op,omitempty" yaml:"op,omitempty"` - Value uint64 `json:"value,omitempty" yaml:"value,omitempty"` -} - -// StatsOptions specify parameters to the Stats function. -// -// See https://goo.gl/GNmLHb for more details. -type StatsOptions struct { - ID string - Stats chan<- *Stats - Stream bool - // A flag that enables stopping the stats operation - Done <-chan bool - // Initial connection timeout - Timeout time.Duration -} - -// Stats sends container statistics for the given container to the given channel. -// -// This function is blocking, similar to a streaming call for logs, and should be run -// on a separate goroutine from the caller. Note that this function will block until -// the given container is removed, not just exited. When finished, this function -// will close the given channel. Alternatively, function can be stopped by -// signaling on the Done channel. -// -// See https://goo.gl/GNmLHb for more details. -func (c *Client) Stats(opts StatsOptions) (retErr error) { - errC := make(chan error, 1) - readCloser, writeCloser := io.Pipe() - - defer func() { - close(opts.Stats) - - select { - case err := <-errC: - if err != nil && retErr == nil { - retErr = err - } - default: - // No errors - } - - if err := readCloser.Close(); err != nil && retErr == nil { - retErr = err - } - }() - - go func() { - err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{ - rawJSONStream: true, - useJSONDecoder: true, - stdout: writeCloser, - timeout: opts.Timeout, - }) - if err != nil { - dockerError, ok := err.(*Error) - if ok { - if dockerError.Status == http.StatusNotFound { - err = &NoSuchContainer{ID: opts.ID} - } - } - } - if closeErr := writeCloser.Close(); closeErr != nil && err == nil { - err = closeErr - } - errC <- err - close(errC) - }() - - quit := make(chan struct{}) - defer close(quit) - go func() { - // block here waiting for the signal to stop function - select { - case <-opts.Done: - readCloser.Close() - case <-quit: - return - } - }() - - decoder := json.NewDecoder(readCloser) - stats := new(Stats) - for err := decoder.Decode(stats); err != io.EOF; err = decoder.Decode(stats) { - if err != nil { - return err - } - opts.Stats <- stats - stats = new(Stats) - } - return nil -} - -// KillContainerOptions represents the set of options that can be used in a -// call to KillContainer. -// -// See https://goo.gl/hkS9i8 for more details. -type KillContainerOptions struct { - // The ID of the container. - ID string `qs:"-"` - - // The signal to send to the container. When omitted, Docker server - // will assume SIGKILL. - Signal Signal -} - -// KillContainer sends a signal to a container, returning an error in case of -// failure. -// -// See https://goo.gl/hkS9i8 for more details. -func (c *Client) KillContainer(opts KillContainerOptions) error { - path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: opts.ID} - } - return err - } - resp.Body.Close() - return nil -} - -// RemoveContainerOptions encapsulates options to remove a container. -// -// See https://goo.gl/RQyX62 for more details. -type RemoveContainerOptions struct { - // The ID of the container. - ID string `qs:"-"` - - // A flag that indicates whether Docker should remove the volumes - // associated to the container. - RemoveVolumes bool `qs:"v"` - - // A flag that indicates whether Docker should remove the container - // even if it is currently running. - Force bool -} - -// RemoveContainer removes a container, returning an error in case of failure. -// -// See https://goo.gl/RQyX62 for more details. -func (c *Client) RemoveContainer(opts RemoveContainerOptions) error { - path := "/containers/" + opts.ID + "?" + queryString(opts) - resp, err := c.do("DELETE", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: opts.ID} - } - return err - } - resp.Body.Close() - return nil -} - -// UploadToContainerOptions is the set of options that can be used when -// uploading an archive into a container. -// -// See https://goo.gl/Ss97HW for more details. -type UploadToContainerOptions struct { - InputStream io.Reader `json:"-" qs:"-"` - Path string `qs:"path"` - NoOverwriteDirNonDir bool `qs:"noOverwriteDirNonDir"` -} - -// UploadToContainer uploads a tar archive to be extracted to a path in the -// filesystem of the container. -// -// See https://goo.gl/Ss97HW for more details. -func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error { - url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) - - return c.stream("PUT", url, streamOptions{ - in: opts.InputStream, - }) -} - -// DownloadFromContainerOptions is the set of options that can be used when -// downloading resources from a container. -// -// See https://goo.gl/KnZJDX for more details. -type DownloadFromContainerOptions struct { - OutputStream io.Writer `json:"-" qs:"-"` - Path string `qs:"path"` -} - -// DownloadFromContainer downloads a tar archive of files or folders in a container. -// -// See https://goo.gl/KnZJDX for more details. -func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error { - url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) - - return c.stream("GET", url, streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - }) -} - -// CopyFromContainerOptions has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer. -// -// See https://goo.gl/R2jevW for more details. -type CopyFromContainerOptions struct { - OutputStream io.Writer `json:"-"` - Container string `json:"-"` - Resource string -} - -// CopyFromContainer has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer. -// -// See https://goo.gl/R2jevW for more details. -func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error { - if opts.Container == "" { - return &NoSuchContainer{ID: opts.Container} - } - url := fmt.Sprintf("/containers/%s/copy", opts.Container) - resp, err := c.do("POST", url, doOptions{data: opts}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: opts.Container} - } - return err - } - defer resp.Body.Close() - _, err = io.Copy(opts.OutputStream, resp.Body) - return err -} - -// WaitContainer blocks until the given container stops, return the exit code -// of the container status. -// -// See https://goo.gl/Gc1rge for more details. -func (c *Client) WaitContainer(id string) (int, error) { - resp, err := c.do("POST", "/containers/"+id+"/wait", doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return 0, &NoSuchContainer{ID: id} - } - return 0, err - } - defer resp.Body.Close() - var r struct{ StatusCode int } - if err := json.NewDecoder(resp.Body).Decode(&r); err != nil { - return 0, err - } - return r.StatusCode, nil -} - -// CommitContainerOptions aggregates parameters to the CommitContainer method. -// -// See https://goo.gl/mqfoCw for more details. -type CommitContainerOptions struct { - Container string - Repository string `qs:"repo"` - Tag string - Message string `qs:"m"` - Author string - Run *Config `qs:"-"` -} - -// CommitContainer creates a new image from a container's changes. -// -// See https://goo.gl/mqfoCw for more details. -func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) { - path := "/commit?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{data: opts.Run}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: opts.Container} - } - return nil, err - } - defer resp.Body.Close() - var image Image - if err := json.NewDecoder(resp.Body).Decode(&image); err != nil { - return nil, err - } - return &image, nil -} - -// AttachToContainerOptions is the set of options that can be used when -// attaching to a container. -// -// See https://goo.gl/NKpkFk for more details. -type AttachToContainerOptions struct { - Container string `qs:"-"` - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - ErrorStream io.Writer `qs:"-"` - - // Get container logs, sending it to OutputStream. - Logs bool - - // Stream the response? - Stream bool - - // Attach to stdin, and use InputStream. - Stdin bool - - // Attach to stdout, and use OutputStream. - Stdout bool - - // Attach to stderr, and use ErrorStream. - Stderr bool - - // If set, after a successful connect, a sentinel will be sent and then the - // client will block on receive before continuing. - // - // It must be an unbuffered channel. Using a buffered channel can lead - // to unexpected behavior. - Success chan struct{} - - // Use raw terminal? Usually true when the container contains a TTY. - RawTerminal bool `qs:"-"` -} - -// AttachToContainer attaches to a container, using the given options. -// -// See https://goo.gl/NKpkFk for more details. -func (c *Client) AttachToContainer(opts AttachToContainerOptions) error { - if opts.Container == "" { - return &NoSuchContainer{ID: opts.Container} - } - path := "/containers/" + opts.Container + "/attach?" + queryString(opts) - return c.hijack("POST", path, hijackOptions{ - success: opts.Success, - setRawTerminal: opts.RawTerminal, - in: opts.InputStream, - stdout: opts.OutputStream, - stderr: opts.ErrorStream, - }) -} - -// LogsOptions represents the set of options used when getting logs from a -// container. -// -// See https://goo.gl/yl8PGm for more details. -type LogsOptions struct { - Container string `qs:"-"` - OutputStream io.Writer `qs:"-"` - ErrorStream io.Writer `qs:"-"` - Follow bool - Stdout bool - Stderr bool - Since int64 - Timestamps bool - Tail string - - // Use raw terminal? Usually true when the container contains a TTY. - RawTerminal bool `qs:"-"` -} - -// Logs gets stdout and stderr logs from the specified container. -// -// See https://goo.gl/yl8PGm for more details. -func (c *Client) Logs(opts LogsOptions) error { - if opts.Container == "" { - return &NoSuchContainer{ID: opts.Container} - } - if opts.Tail == "" { - opts.Tail = "all" - } - path := "/containers/" + opts.Container + "/logs?" + queryString(opts) - return c.stream("GET", path, streamOptions{ - setRawTerminal: opts.RawTerminal, - stdout: opts.OutputStream, - stderr: opts.ErrorStream, - }) -} - -// ResizeContainerTTY resizes the terminal to the given height and width. -// -// See https://goo.gl/xERhCc for more details. -func (c *Client) ResizeContainerTTY(id string, height, width int) error { - params := make(url.Values) - params.Set("h", strconv.Itoa(height)) - params.Set("w", strconv.Itoa(width)) - resp, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), doOptions{}) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ExportContainerOptions is the set of parameters to the ExportContainer -// method. -// -// See https://goo.gl/dOkTyk for more details. -type ExportContainerOptions struct { - ID string - OutputStream io.Writer -} - -// ExportContainer export the contents of container id as tar archive -// and prints the exported contents to stdout. -// -// See https://goo.gl/dOkTyk for more details. -func (c *Client) ExportContainer(opts ExportContainerOptions) error { - if opts.ID == "" { - return &NoSuchContainer{ID: opts.ID} - } - url := fmt.Sprintf("/containers/%s/export", opts.ID) - return c.stream("GET", url, streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - }) -} - -// NoSuchContainer is the error returned when a given container does not exist. -type NoSuchContainer struct { - ID string - Err error -} - -func (err *NoSuchContainer) Error() string { - if err.Err != nil { - return err.Err.Error() - } - return "No such container: " + err.ID -} - -// ContainerAlreadyRunning is the error returned when a given container is -// already running. -type ContainerAlreadyRunning struct { - ID string -} - -func (err *ContainerAlreadyRunning) Error() string { - return "Container already running: " + err.ID -} - -// ContainerNotRunning is the error returned when a given container is not -// running. -type ContainerNotRunning struct { - ID string -} - -func (err *ContainerNotRunning) Error() string { - return "Container not running: " + err.ID -} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_test.go b/vendor/github.com/fsouza/go-dockerclient/container_test.go deleted file mode 100644 index 2f93b630..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/container_test.go +++ /dev/null @@ -1,2244 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "net/url" - "os" - "reflect" - "regexp" - "runtime" - "strconv" - "strings" - "testing" - "time" - - "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp" -) - -func TestStateString(t *testing.T) { - started := time.Now().Add(-3 * time.Hour) - var tests = []struct { - input State - expected string - }{ - {State{Running: true, Paused: true}, "^paused$"}, - {State{Running: true, StartedAt: started}, "^Up 3h.*$"}, - {State{Running: false, ExitCode: 7}, "^Exit 7$"}, - } - for _, tt := range tests { - re := regexp.MustCompile(tt.expected) - if got := tt.input.String(); !re.MatchString(got) { - t.Errorf("State.String(): wrong result. Want %q. Got %q.", tt.expected, got) - } - } -} - -func TestListContainers(t *testing.T) { - jsonContainers := `[ - { - "Id": "8dfafdbc3a40", - "Image": "base:latest", - "Command": "echo 1", - "Created": 1367854155, - "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], - "Status": "Exit 0" - }, - { - "Id": "9cd87474be90", - "Image": "base:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], - "Status": "Exit 0" - }, - { - "Id": "3176a2479c92", - "Image": "base:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Ports":[{"PrivatePort": 2221, "PublicPort": 3331, "Type": "tcp"}], - "Status": "Exit 0" - }, - { - "Id": "4cb07b47f9fb", - "Image": "base:latest", - "Command": "echo 444444444444444444444444444444444", - "Ports":[{"PrivatePort": 2223, "PublicPort": 3332, "Type": "tcp"}], - "Created": 1367854152, - "Status": "Exit 0" - } -]` - var expected []APIContainers - err := json.Unmarshal([]byte(jsonContainers), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: jsonContainers, status: http.StatusOK}) - containers, err := client.ListContainers(ListContainersOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(containers, expected) { - t.Errorf("ListContainers: Expected %#v. Got %#v.", expected, containers) - } -} - -func TestListContainersParams(t *testing.T) { - var tests = []struct { - input ListContainersOptions - params map[string][]string - }{ - {ListContainersOptions{}, map[string][]string{}}, - {ListContainersOptions{All: true}, map[string][]string{"all": {"1"}}}, - {ListContainersOptions{All: true, Limit: 10}, map[string][]string{"all": {"1"}, "limit": {"10"}}}, - { - ListContainersOptions{All: true, Limit: 10, Since: "adf9983", Before: "abdeef"}, - map[string][]string{"all": {"1"}, "limit": {"10"}, "since": {"adf9983"}, "before": {"abdeef"}}, - }, - { - ListContainersOptions{Filters: map[string][]string{"status": {"paused", "running"}}}, - map[string][]string{"filters": {"{\"status\":[\"paused\",\"running\"]}"}}, - }, - { - ListContainersOptions{All: true, Filters: map[string][]string{"exited": {"0"}, "status": {"exited"}}}, - map[string][]string{"all": {"1"}, "filters": {"{\"exited\":[\"0\"],\"status\":[\"exited\"]}"}}, - }, - } - fakeRT := &FakeRoundTripper{message: "[]", status: http.StatusOK} - client := newTestClient(fakeRT) - u, _ := url.Parse(client.getURL("/containers/json")) - for _, tt := range tests { - if _, err := client.ListContainers(tt.input); err != nil { - t.Error(err) - } - got := map[string][]string(fakeRT.requests[0].URL.Query()) - if !reflect.DeepEqual(got, tt.params) { - t.Errorf("Expected %#v, got %#v.", tt.params, got) - } - if path := fakeRT.requests[0].URL.Path; path != u.Path { - t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path) - } - if meth := fakeRT.requests[0].Method; meth != "GET" { - t.Errorf("Wrong HTTP method. Want GET. Got %s.", meth) - } - fakeRT.Reset() - } -} - -func TestListContainersFailure(t *testing.T) { - var tests = []struct { - status int - message string - }{ - {400, "bad parameter"}, - {500, "internal server error"}, - } - for _, tt := range tests { - client := newTestClient(&FakeRoundTripper{message: tt.message, status: tt.status}) - expected := Error{Status: tt.status, Message: tt.message} - containers, err := client.ListContainers(ListContainersOptions{}) - if !reflect.DeepEqual(expected, *err.(*Error)) { - t.Errorf("Wrong error in ListContainers. Want %#v. Got %#v.", expected, err) - } - if len(containers) > 0 { - t.Errorf("ListContainers failure. Expected empty list. Got %#v.", containers) - } - } -} - -func TestInspectContainer(t *testing.T) { - jsonContainer := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "AppArmorProfile": "Profile", - "Created": "2013-05-07T14:51:42.087658+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 17179869184, - "MemorySwap": 34359738368, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Image": "base", - "Volumes": {}, - "VolumesFrom": "", - "SecurityOpt": [ - "label:user:USER" - ], - "Ulimits": [ - { "Name": "nofile", "Soft": 1024, "Hard": 2048 } - ] - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:00", - "Ghost": false - }, - "Node": { - "ID": "4I4E:QR4I:Z733:QEZK:5X44:Q4T7:W2DD:JRDY:KB2O:PODO:Z5SR:XRB6", - "IP": "192.168.99.105", - "Addra": "192.168.99.105:2376", - "Name": "node-01", - "Cpus": 4, - "Memory": 1048436736, - "Labels": { - "executiondriver": "native-0.2", - "kernelversion": "3.18.5-tinycore64", - "operatingsystem": "Boot2Docker 1.5.0 (TCL 5.4); master : a66bce5 - Tue Feb 10 23:31:27 UTC 2015", - "provider": "virtualbox", - "storagedriver": "aufs" - } - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {}, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49153" - } - ] - }, - "Links": null, - "PublishAllPorts": false, - "CgroupParent": "/mesos", - "Memory": 17179869184, - "MemorySwap": 34359738368, - "GroupAdd": ["fake", "12345"] - } -}` - var expected Container - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c678" - container, err := client.InspectContainer(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*container, expected) { - t.Errorf("InspectContainer(%q): Expected %#v. Got %#v.", id, expected, container) - } - expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("InspectContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestInspectContainerNetwork(t *testing.T) { - jsonContainer := `{ - "Id": "81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c", - "Created": "2015-11-12T14:54:04.791485659Z", - "Path": "consul-template", - "Args": [ - "-config=/tmp/haproxy.json", - "-consul=192.168.99.120:8500" - ], - "State": { - "Status": "running", - "Running": true, - "Paused": false, - "Restarting": false, - "OOMKilled": false, - "Dead": false, - "Pid": 3196, - "ExitCode": 0, - "Error": "", - "StartedAt": "2015-11-12T14:54:05.026747471Z", - "FinishedAt": "0001-01-01T00:00:00Z" - }, - "Image": "4921c5917fc117df3dec32f4c1976635dc6c56ccd3336fe1db3477f950e78bf7", - "ResolvConfPath": "/mnt/sda1/var/lib/docker/containers/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c/resolv.conf", - "HostnamePath": "/mnt/sda1/var/lib/docker/containers/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c/hostname", - "HostsPath": "/mnt/sda1/var/lib/docker/containers/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c/hosts", - "LogPath": "/mnt/sda1/var/lib/docker/containers/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c-json.log", - "Node": { - "ID": "AUIB:LFOT:3LSF:SCFS:OYDQ:NLXD:JZNE:4INI:3DRC:ZFBB:GWCY:DWJK", - "IP": "192.168.99.121", - "Addr": "192.168.99.121:2376", - "Name": "swl-demo1", - "Cpus": 1, - "Memory": 2099945472, - "Labels": { - "executiondriver": "native-0.2", - "kernelversion": "4.1.12-boot2docker", - "operatingsystem": "Boot2Docker 1.9.0 (TCL 6.4); master : 16e4a2a - Tue Nov 3 19:49:22 UTC 2015", - "provider": "virtualbox", - "storagedriver": "aufs" - } - }, - "Name": "/docker-proxy.swl-demo1", - "RestartCount": 0, - "Driver": "aufs", - "ExecDriver": "native-0.2", - "MountLabel": "", - "ProcessLabel": "", - "AppArmorProfile": "", - "ExecIDs": null, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "LxcConf": [], - "Memory": 0, - "MemoryReservation": 0, - "MemorySwap": 0, - "KernelMemory": 0, - "CpuShares": 0, - "CpuPeriod": 0, - "CpusetCpus": "", - "CpusetMems": "", - "CpuQuota": 0, - "BlkioWeight": 0, - "OomKillDisable": false, - "MemorySwappiness": -1, - "Privileged": false, - "PortBindings": { - "443/tcp": [ - { - "HostIp": "", - "HostPort": "443" - } - ] - }, - "Links": null, - "PublishAllPorts": false, - "Dns": null, - "DnsOptions": null, - "DnsSearch": null, - "ExtraHosts": null, - "VolumesFrom": null, - "Devices": [], - "NetworkMode": "swl-net", - "IpcMode": "", - "PidMode": "", - "UTSMode": "", - "CapAdd": null, - "CapDrop": null, - "GroupAdd": null, - "RestartPolicy": { - "Name": "no", - "MaximumRetryCount": 0 - }, - "SecurityOpt": null, - "ReadonlyRootfs": false, - "Ulimits": null, - "LogConfig": { - "Type": "json-file", - "Config": {} - }, - "CgroupParent": "", - "ConsoleSize": [ - 0, - 0 - ], - "VolumeDriver": "" - }, - "GraphDriver": { - "Name": "aufs", - "Data": null - }, - "Mounts": [], - "Config": { - "Hostname": "81e1bbe20b55", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": false, - "AttachStderr": false, - "ExposedPorts": { - "443/tcp": {} - }, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": [ - "DOMAIN=local.auto", - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "CONSUL_TEMPLATE_VERSION=0.11.1" - ], - "Cmd": [ - "-consul=192.168.99.120:8500" - ], - "Image": "docker-proxy:latest", - "Volumes": null, - "WorkingDir": "", - "Entrypoint": [ - "consul-template", - "-config=/tmp/haproxy.json" - ], - "OnBuild": null, - "Labels": {}, - "StopSignal": "SIGTERM" - }, - "NetworkSettings": { - "Bridge": "", - "SandboxID": "c6b903dc5c1a96113a22dbc44709e30194079bd2d262eea1eb4f38d85821f6e1", - "HairpinMode": false, - "LinkLocalIPv6Address": "", - "LinkLocalIPv6PrefixLen": 0, - "Ports": { - "443/tcp": [ - { - "HostIp": "192.168.99.121", - "HostPort": "443" - } - ] - }, - "SandboxKey": "/var/run/docker/netns/c6b903dc5c1a", - "SecondaryIPAddresses": null, - "SecondaryIPv6Addresses": null, - "EndpointID": "", - "Gateway": "", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "IPAddress": "", - "IPPrefixLen": 0, - "IPv6Gateway": "", - "MacAddress": "", - "Networks": { - "swl-net": { - "EndpointID": "683e3092275782a53c3b0968cc7e3a10f23264022ded9cb20490902f96fc5981", - "Gateway": "", - "IPAddress": "10.0.0.3", - "IPPrefixLen": 24, - "IPv6Gateway": "", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "MacAddress": "02:42:0a:00:00:03" - } - } - } -}` - - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "81e1bbe20b55" - exp := "10.0.0.3" - - container, err := client.InspectContainer(id) - if err != nil { - t.Fatal(err) - } - - s := reflect.Indirect(reflect.ValueOf(container.NetworkSettings)) - networks := s.FieldByName("Networks") - if networks.IsValid() { - var ip string - for _, net := range networks.MapKeys() { - if net.Interface().(string) == container.HostConfig.NetworkMode { - ip = networks.MapIndex(net).FieldByName("IPAddress").Interface().(string) - t.Logf("%s %v", net, ip) - } - } - if ip != exp { - t.Errorf("InspectContainerNetworks(%q): Expected %#v. Got %#v.", id, exp, ip) - } - } else { - t.Errorf("InspectContainerNetworks(%q): No method Networks for NetworkSettings", id) - } - -} - -func TestInspectContainerNegativeSwap(t *testing.T) { - jsonContainer := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.087658+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 17179869184, - "MemorySwap": -1, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Image": "base", - "Volumes": {}, - "VolumesFrom": "" - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:00", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {}, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49153" - } - ] - }, - "Links": null, - "PublishAllPorts": false - } -}` - var expected Container - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c678" - container, err := client.InspectContainer(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*container, expected) { - t.Errorf("InspectContainer(%q): Expected %#v. Got %#v.", id, expected, container) - } - expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("InspectContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestInspectContainerFailure(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "server error", status: 500}) - expected := Error{Status: 500, Message: "server error"} - container, err := client.InspectContainer("abe033") - if container != nil { - t.Errorf("InspectContainer: Expected container, got %#v", container) - } - if !reflect.DeepEqual(expected, *err.(*Error)) { - t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err) - } -} - -func TestInspectContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404}) - container, err := client.InspectContainer("abe033") - if container != nil { - t.Errorf("InspectContainer: Expected container, got %#v", container) - } - expected := &NoSuchContainer{ID: "abe033"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err) - } -} - -func TestContainerChanges(t *testing.T) { - jsonChanges := `[ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } -]` - var expected []Change - err := json.Unmarshal([]byte(jsonChanges), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonChanges, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c678" - changes, err := client.ContainerChanges(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(changes, expected) { - t.Errorf("ContainerChanges(%q): Expected %#v. Got %#v.", id, expected, changes) - } - expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/changes")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("ContainerChanges(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestContainerChangesFailure(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "server error", status: 500}) - expected := Error{Status: 500, Message: "server error"} - changes, err := client.ContainerChanges("abe033") - if changes != nil { - t.Errorf("ContainerChanges: Expected changes, got %#v", changes) - } - if !reflect.DeepEqual(expected, *err.(*Error)) { - t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err) - } -} - -func TestContainerChangesNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404}) - changes, err := client.ContainerChanges("abe033") - if changes != nil { - t.Errorf("ContainerChanges: Expected changes, got %#v", changes) - } - expected := &NoSuchContainer{ID: "abe033"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err) - } -} - -func TestCreateContainer(t *testing.T) { - jsonContainer := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Warnings": [] -}` - var expected Container - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - config := Config{AttachStdout: true, AttachStdin: true} - opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config} - container, err := client.CreateContainer(opts) - if err != nil { - t.Fatal(err) - } - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - if container.ID != id { - t.Errorf("CreateContainer: wrong ID. Want %q. Got %q.", id, container.ID) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("CreateContainer: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/create")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("CreateContainer: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - var gotBody Config - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } -} - -func TestCreateContainerImageNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "No such image", status: http.StatusNotFound}) - config := Config{AttachStdout: true, AttachStdin: true} - container, err := client.CreateContainer(CreateContainerOptions{Config: &config}) - if container != nil { - t.Errorf("CreateContainer: expected container, got %#v.", container) - } - if !reflect.DeepEqual(err, ErrNoSuchImage) { - t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrNoSuchImage, err) - } -} - -func TestCreateContainerDuplicateName(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "No such image", status: http.StatusConflict}) - config := Config{AttachStdout: true, AttachStdin: true} - container, err := client.CreateContainer(CreateContainerOptions{Config: &config}) - if container != nil { - t.Errorf("CreateContainer: expected container, got %#v.", container) - } - if err != ErrContainerAlreadyExists { - t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrContainerAlreadyExists, err) - } -} - -func TestCreateContainerWithHostConfig(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "{}", status: http.StatusOK} - client := newTestClient(fakeRT) - config := Config{} - hostConfig := HostConfig{PublishAllPorts: true} - opts := CreateContainerOptions{Name: "TestCreateContainerWithHostConfig", Config: &config, HostConfig: &hostConfig} - _, err := client.CreateContainer(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - var gotBody map[string]interface{} - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } - if _, ok := gotBody["HostConfig"]; !ok { - t.Errorf("CreateContainer: wrong body. HostConfig was not serialized") - } -} - -func TestStartContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.StartContainer(id, &HostConfig{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType) - } -} - -func TestStartContainerNilHostConfig(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.StartContainer(id, nil) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType) - } - var buf [4]byte - req.Body.Read(buf[:]) - if string(buf[:]) != "null" { - t.Errorf("Startcontainer(%q): Wrong body. Want null. Got %s", id, buf[:]) - } -} - -func TestStartContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.StartContainer("a2344", &HostConfig{}) - expected := &NoSuchContainer{ID: "a2344", Err: err.(*NoSuchContainer).Err} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestStartContainerAlreadyRunning(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "container already running", status: http.StatusNotModified}) - err := client.StartContainer("a2334", &HostConfig{}) - expected := &ContainerAlreadyRunning{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestStopContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.StopContainer(id, 10) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("StopContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/stop")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("StopContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestStopContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.StopContainer("a2334", 10) - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestStopContainerNotRunning(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "container not running", status: http.StatusNotModified}) - err := client.StopContainer("a2334", 10) - expected := &ContainerNotRunning{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestRestartContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.RestartContainer(id, 10) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("RestartContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/restart")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("RestartContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestRestartContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.RestartContainer("a2334", 10) - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("RestartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestPauseContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.PauseContainer(id) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("PauseContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/pause")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("PauseContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestPauseContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.PauseContainer("a2334") - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("PauseContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestUnpauseContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.UnpauseContainer(id) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("PauseContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/unpause")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("PauseContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestUnpauseContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.UnpauseContainer("a2334") - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("PauseContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestKillContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.KillContainer(KillContainerOptions{ID: id}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/kill")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("KillContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestKillContainerSignal(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.KillContainer(KillContainerOptions{ID: id, Signal: SIGTERM}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - if signal := req.URL.Query().Get("signal"); signal != "15" { - t.Errorf("KillContainer(%q): Wrong query string in request. Want %q. Got %q.", id, "15", signal) - } -} - -func TestKillContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.KillContainer(KillContainerOptions{ID: "a2334"}) - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("KillContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestRemoveContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - opts := RemoveContainerOptions{ID: id} - err := client.RemoveContainer(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "DELETE" { - t.Errorf("RemoveContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "DELETE", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id)) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("RemoveContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestRemoveContainerRemoveVolumes(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - opts := RemoveContainerOptions{ID: id, RemoveVolumes: true} - err := client.RemoveContainer(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - params := map[string][]string(req.URL.Query()) - expected := map[string][]string{"v": {"1"}} - if !reflect.DeepEqual(params, expected) { - t.Errorf("RemoveContainer(%q): wrong parameters. Want %#v. Got %#v.", id, expected, params) - } -} - -func TestRemoveContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.RemoveContainer(RemoveContainerOptions{ID: "a2334"}) - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("RemoveContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestResizeContainerTTY(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.ResizeContainerTTY(id, 40, 80) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("ResizeContainerTTY(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/resize")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("ResizeContainerTTY(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - got := map[string][]string(req.URL.Query()) - expectedParams := map[string][]string{ - "w": {"80"}, - "h": {"40"}, - } - if !reflect.DeepEqual(got, expectedParams) { - t.Errorf("Expected %#v, got %#v.", expectedParams, got) - } -} - -func TestWaitContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: `{"StatusCode": 56}`, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - status, err := client.WaitContainer(id) - if err != nil { - t.Fatal(err) - } - if status != 56 { - t.Errorf("WaitContainer(%q): wrong return. Want 56. Got %d.", id, status) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("WaitContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/wait")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("WaitContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestWaitContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - _, err := client.WaitContainer("a2334") - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("WaitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestCommitContainer(t *testing.T) { - response := `{"Id":"596069db4bf5"}` - client := newTestClient(&FakeRoundTripper{message: response, status: http.StatusOK}) - id := "596069db4bf5" - image, err := client.CommitContainer(CommitContainerOptions{}) - if err != nil { - t.Fatal(err) - } - if image.ID != id { - t.Errorf("CommitContainer: Wrong image id. Want %q. Got %q.", id, image.ID) - } -} - -func TestCommitContainerParams(t *testing.T) { - cfg := Config{Memory: 67108864} - json, _ := json.Marshal(&cfg) - var tests = []struct { - input CommitContainerOptions - params map[string][]string - body []byte - }{ - {CommitContainerOptions{}, map[string][]string{}, nil}, - {CommitContainerOptions{Container: "44c004db4b17"}, map[string][]string{"container": {"44c004db4b17"}}, nil}, - { - CommitContainerOptions{Container: "44c004db4b17", Repository: "tsuru/python", Message: "something"}, - map[string][]string{"container": {"44c004db4b17"}, "repo": {"tsuru/python"}, "m": {"something"}}, - nil, - }, - { - CommitContainerOptions{Container: "44c004db4b17", Run: &cfg}, - map[string][]string{"container": {"44c004db4b17"}}, - json, - }, - } - fakeRT := &FakeRoundTripper{message: "{}", status: http.StatusOK} - client := newTestClient(fakeRT) - u, _ := url.Parse(client.getURL("/commit")) - for _, tt := range tests { - if _, err := client.CommitContainer(tt.input); err != nil { - t.Error(err) - } - got := map[string][]string(fakeRT.requests[0].URL.Query()) - if !reflect.DeepEqual(got, tt.params) { - t.Errorf("Expected %#v, got %#v.", tt.params, got) - } - if path := fakeRT.requests[0].URL.Path; path != u.Path { - t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path) - } - if meth := fakeRT.requests[0].Method; meth != "POST" { - t.Errorf("Wrong HTTP method. Want POST. Got %s.", meth) - } - if tt.body != nil { - if requestBody, err := ioutil.ReadAll(fakeRT.requests[0].Body); err == nil { - if bytes.Compare(requestBody, tt.body) != 0 { - t.Errorf("Expected body %#v, got %#v", tt.body, requestBody) - } - } else { - t.Errorf("Error reading request body: %#v", err) - } - } - fakeRT.Reset() - } -} - -func TestCommitContainerFailure(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusInternalServerError}) - _, err := client.CommitContainer(CommitContainerOptions{}) - if err == nil { - t.Error("Expected non-nil error, got .") - } -} - -func TestCommitContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - _, err := client.CommitContainer(CommitContainerOptions{}) - expected := &NoSuchContainer{ID: ""} - if !reflect.DeepEqual(err, expected) { - t.Errorf("CommitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestAttachToContainerLogs(t *testing.T) { - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 19}) - w.Write([]byte("something happened!")) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &buf, - Stdout: true, - Stderr: true, - Logs: true, - } - err := client.AttachToContainer(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("AttachToContainer for logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } - if req.Method != "POST" { - t.Errorf("AttachToContainer: wrong HTTP method. Want POST. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/containers/a123456/attach")) - if req.URL.Path != u.Path { - t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQs := map[string][]string{ - "logs": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expectedQs) { - t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expectedQs, got) - } -} - -func TestAttachToContainer(t *testing.T) { - var reader = strings.NewReader("send value") - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout, stderr bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &stdout, - ErrorStream: &stderr, - InputStream: reader, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: true, - } - err := client.AttachToContainer(opts) - if err != nil { - t.Fatal(err) - } - expected := map[string][]string{ - "stdin": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - "stream": {"1"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestAttachToContainerSentinel(t *testing.T) { - var reader = strings.NewReader("send value") - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout, stderr bytes.Buffer - success := make(chan struct{}) - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &stdout, - ErrorStream: &stderr, - InputStream: reader, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: true, - Success: success, - } - go func() { - if err := client.AttachToContainer(opts); err != nil { - t.Error(err) - } - }() - success <- <-success -} - -func TestAttachToContainerNilStdout(t *testing.T) { - var reader = strings.NewReader("send value") - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stderr bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: nil, - ErrorStream: &stderr, - InputStream: reader, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: true, - } - err := client.AttachToContainer(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestAttachToContainerNilStderr(t *testing.T) { - var reader = strings.NewReader("send value") - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &stdout, - InputStream: reader, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: true, - } - err := client.AttachToContainer(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestAttachToContainerStdinOnly(t *testing.T) { - var reader = strings.NewReader("send value") - serverFinished := make(chan struct{}) - clientFinished := make(chan struct{}) - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - hj, ok := w.(http.Hijacker) - if !ok { - t.Fatal("cannot hijack server connection") - } - conn, _, err := hj.Hijack() - if err != nil { - t.Fatal(err) - } - // wait for client to indicate it's finished - <-clientFinished - // inform test that the server has finished - close(serverFinished) - conn.Close() - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - success := make(chan struct{}) - opts := AttachToContainerOptions{ - Container: "a123456", - InputStream: reader, - Stdin: true, - Stdout: false, - Stderr: false, - Stream: true, - RawTerminal: false, - Success: success, - } - go func() { - if err := client.AttachToContainer(opts); err != nil { - t.Error(err) - } - // client's attach session is over - close(clientFinished) - }() - success <- <-success - // wait for server to finish handling attach - <-serverFinished -} - -func TestAttachToContainerRawTerminalFalse(t *testing.T) { - input := strings.NewReader("send value") - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - req = *r - w.WriteHeader(http.StatusOK) - hj, ok := w.(http.Hijacker) - if !ok { - t.Fatal("cannot hijack server connection") - } - conn, _, err := hj.Hijack() - if err != nil { - t.Fatal(err) - } - conn.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - conn.Write([]byte("hello")) - conn.Write([]byte{2, 0, 0, 0, 0, 0, 0, 6}) - conn.Write([]byte("hello!")) - conn.Close() - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout, stderr bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &stdout, - ErrorStream: &stderr, - InputStream: input, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: false, - } - client.AttachToContainer(opts) - expected := map[string][]string{ - "stdin": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - "stream": {"1"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got) - } - if stdout.String() != "hello" { - t.Errorf("AttachToContainer: wrong content written to stdout. Want %q. Got %q.", "hello", stdout.String()) - } - if stderr.String() != "hello!" { - t.Errorf("AttachToContainer: wrong content written to stderr. Want %q. Got %q.", "hello!", stderr.String()) - } -} - -func TestAttachToContainerWithoutContainer(t *testing.T) { - var client Client - err := client.AttachToContainer(AttachToContainerOptions{}) - expected := &NoSuchContainer{ID: ""} - if !reflect.DeepEqual(err, expected) { - t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err) - } -} - -func TestLogs(t *testing.T) { - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := LogsOptions{ - Container: "a123456", - OutputStream: &buf, - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } - if req.Method != "GET" { - t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/containers/a123456/logs")) - if req.URL.Path != u.Path { - t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQs := map[string][]string{ - "follow": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - "timestamps": {"1"}, - "tail": {"all"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expectedQs) { - t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got) - } -} - -func TestLogsNilStdoutDoesntFail(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - opts := LogsOptions{ - Container: "a123456", - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestLogsNilStderrDoesntFail(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{2, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - opts := LogsOptions{ - Container: "a123456", - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestLogsSpecifyingTail(t *testing.T) { - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := LogsOptions{ - Container: "a123456", - OutputStream: &buf, - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - Tail: "100", - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } - if req.Method != "GET" { - t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/containers/a123456/logs")) - if req.URL.Path != u.Path { - t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQs := map[string][]string{ - "follow": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - "timestamps": {"1"}, - "tail": {"100"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expectedQs) { - t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got) - } -} - -func TestLogsRawTerminal(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("something happened!")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := LogsOptions{ - Container: "a123456", - OutputStream: &buf, - Follow: true, - RawTerminal: true, - Stdout: true, - Stderr: true, - Timestamps: true, - Tail: "100", - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } -} - -func TestLogsNoContainer(t *testing.T) { - var client Client - err := client.Logs(LogsOptions{}) - expected := &NoSuchContainer{ID: ""} - if !reflect.DeepEqual(err, expected) { - t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err) - } -} - -func TestNoSuchContainerError(t *testing.T) { - var err = &NoSuchContainer{ID: "i345"} - expected := "No such container: i345" - if got := err.Error(); got != expected { - t.Errorf("NoSuchContainer: wrong message. Want %q. Got %q.", expected, got) - } -} - -func TestNoSuchContainerErrorMessage(t *testing.T) { - var err = &NoSuchContainer{ID: "i345", Err: errors.New("some advanced error info")} - expected := "some advanced error info" - if got := err.Error(); got != expected { - t.Errorf("NoSuchContainer: wrong message. Want %q. Got %q.", expected, got) - } -} - -func TestExportContainer(t *testing.T) { - content := "exported container tar content" - out := stdoutMock{bytes.NewBufferString(content)} - client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) - opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out} - err := client.ExportContainer(opts) - if err != nil { - t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error()) - } - if out.String() != content { - t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) - } -} - -func TestExportContainerViaUnixSocket(t *testing.T) { - if runtime.GOOS != "darwin" { - t.Skip(fmt.Sprintf("skipping test on %s", runtime.GOOS)) - } - content := "exported container tar content" - var buf []byte - out := bytes.NewBuffer(buf) - tempSocket := tempfile("export_socket") - defer os.Remove(tempSocket) - endpoint := "unix://" + tempSocket - u, _ := parseEndpoint(endpoint, false) - client := Client{ - HTTPClient: cleanhttp.DefaultClient(), - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - SkipServerVersionCheck: true, - } - listening := make(chan string) - done := make(chan int) - go runStreamConnServer(t, "unix", tempSocket, listening, done) - <-listening // wait for server to start - opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out} - err := client.ExportContainer(opts) - <-done // make sure server stopped - if err != nil { - t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error()) - } - if out.String() != content { - t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) - } -} - -func runStreamConnServer(t *testing.T, network, laddr string, listening chan<- string, done chan<- int) { - defer close(done) - l, err := net.Listen(network, laddr) - if err != nil { - t.Errorf("Listen(%q, %q) failed: %v", network, laddr, err) - listening <- "" - return - } - defer l.Close() - listening <- l.Addr().String() - c, err := l.Accept() - if err != nil { - t.Logf("Accept failed: %v", err) - return - } - c.Write([]byte("HTTP/1.1 200 OK\n\nexported container tar content")) - c.Close() -} - -func tempfile(filename string) string { - return os.TempDir() + "/" + filename + "." + strconv.Itoa(os.Getpid()) -} - -func TestExportContainerNoId(t *testing.T) { - client := Client{} - out := stdoutMock{bytes.NewBufferString("")} - err := client.ExportContainer(ExportContainerOptions{OutputStream: out}) - e, ok := err.(*NoSuchContainer) - if !ok { - t.Errorf("ExportContainer: wrong error. Want NoSuchContainer. Got %#v.", e) - } - if e.ID != "" { - t.Errorf("ExportContainer: wrong ID. Want %q. Got %q", "", e.ID) - } -} - -func TestUploadToContainer(t *testing.T) { - content := "File content" - in := stdinMock{bytes.NewBufferString(content)} - fakeRT := &FakeRoundTripper{status: http.StatusOK} - client := newTestClient(fakeRT) - opts := UploadToContainerOptions{ - Path: "abc", - InputStream: in, - } - err := client.UploadToContainer("a123456", opts) - if err != nil { - t.Errorf("UploadToContainer: caught error %#v while uploading archive to container, expected nil", err) - } - - req := fakeRT.requests[0] - - if req.Method != "PUT" { - t.Errorf("UploadToContainer{Path:abc}: Wrong HTTP method. Want PUT. Got %s", req.Method) - } - - if pathParam := req.URL.Query().Get("path"); pathParam != "abc" { - t.Errorf("ListImages({Path:abc}): Wrong parameter. Want path=abc. Got path=%s", pathParam) - } - -} - -func TestDownloadFromContainer(t *testing.T) { - filecontent := "File content" - client := newTestClient(&FakeRoundTripper{message: filecontent, status: http.StatusOK}) - - var out bytes.Buffer - opts := DownloadFromContainerOptions{ - OutputStream: &out, - } - err := client.DownloadFromContainer("a123456", opts) - if err != nil { - t.Errorf("DownloadFromContainer: caught error %#v while downloading from container, expected nil", err.Error()) - } - if out.String() != filecontent { - t.Errorf("DownloadFromContainer: wrong stdout. Want %#v. Got %#v.", filecontent, out.String()) - } -} - -func TestCopyFromContainer(t *testing.T) { - content := "File content" - out := stdoutMock{bytes.NewBufferString(content)} - client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) - opts := CopyFromContainerOptions{ - Container: "a123456", - OutputStream: &out, - } - err := client.CopyFromContainer(opts) - if err != nil { - t.Errorf("CopyFromContainer: caught error %#v while copying from container, expected nil", err.Error()) - } - if out.String() != content { - t.Errorf("CopyFromContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) - } -} - -func TestCopyFromContainerEmptyContainer(t *testing.T) { - client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) - err := client.CopyFromContainer(CopyFromContainerOptions{}) - _, ok := err.(*NoSuchContainer) - if !ok { - t.Errorf("CopyFromContainer: invalid error returned. Want NoSuchContainer, got %#v.", err) - } -} - -func TestPassingNameOptToCreateContainerReturnsItInContainer(t *testing.T) { - jsonContainer := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Warnings": [] -}` - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - config := Config{AttachStdout: true, AttachStdin: true} - opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config} - container, err := client.CreateContainer(opts) - if err != nil { - t.Fatal(err) - } - if container.Name != "TestCreateContainer" { - t.Errorf("Container name expected to be TestCreateContainer, was %s", container.Name) - } -} - -func TestAlwaysRestart(t *testing.T) { - policy := AlwaysRestart() - if policy.Name != "always" { - t.Errorf("AlwaysRestart(): wrong policy name. Want %q. Got %q", "always", policy.Name) - } - if policy.MaximumRetryCount != 0 { - t.Errorf("AlwaysRestart(): wrong MaximumRetryCount. Want 0. Got %d", policy.MaximumRetryCount) - } -} - -func TestRestartOnFailure(t *testing.T) { - const retry = 5 - policy := RestartOnFailure(retry) - if policy.Name != "on-failure" { - t.Errorf("RestartOnFailure(%d): wrong policy name. Want %q. Got %q", retry, "on-failure", policy.Name) - } - if policy.MaximumRetryCount != retry { - t.Errorf("RestartOnFailure(%d): wrong MaximumRetryCount. Want %d. Got %d", retry, retry, policy.MaximumRetryCount) - } -} - -func TestNeverRestart(t *testing.T) { - policy := NeverRestart() - if policy.Name != "no" { - t.Errorf("NeverRestart(): wrong policy name. Want %q. Got %q", "always", policy.Name) - } - if policy.MaximumRetryCount != 0 { - t.Errorf("NeverRestart(): wrong MaximumRetryCount. Want 0. Got %d", policy.MaximumRetryCount) - } -} - -func TestTopContainer(t *testing.T) { - jsonTop := `{ - "Processes": [ - [ - "ubuntu", - "3087", - "815", - "0", - "01:44", - "?", - "00:00:00", - "cmd1" - ], - [ - "root", - "3158", - "3087", - "0", - "01:44", - "?", - "00:00:01", - "cmd2" - ] - ], - "Titles": [ - "UID", - "PID", - "PPID", - "C", - "STIME", - "TTY", - "TIME", - "CMD" - ] -}` - var expected TopResult - err := json.Unmarshal([]byte(jsonTop), &expected) - if err != nil { - t.Fatal(err) - } - id := "4fa6e0f0" - fakeRT := &FakeRoundTripper{message: jsonTop, status: http.StatusOK} - client := newTestClient(fakeRT) - processes, err := client.TopContainer(id, "") - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(processes, expected) { - t.Errorf("TopContainer: Expected %#v. Got %#v.", expected, processes) - } - if len(processes.Processes) != 2 || len(processes.Processes[0]) != 8 || - processes.Processes[0][7] != "cmd1" { - t.Errorf("TopContainer: Process list to include cmd1. Got %#v.", processes) - } - expectedURI := "/containers/" + id + "/top" - if !strings.HasSuffix(fakeRT.requests[0].URL.String(), expectedURI) { - t.Errorf("TopContainer: Expected URI to have %q. Got %q.", expectedURI, fakeRT.requests[0].URL.String()) - } -} - -func TestTopContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - _, err := client.TopContainer("abef348", "") - expected := &NoSuchContainer{ID: "abef348"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestTopContainerWithPsArgs(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "no such container", status: http.StatusNotFound} - client := newTestClient(fakeRT) - expectedErr := &NoSuchContainer{ID: "abef348"} - if _, err := client.TopContainer("abef348", "aux"); !reflect.DeepEqual(expectedErr, err) { - t.Errorf("TopContainer: Expected %v. Got %v.", expectedErr, err) - } - expectedURI := "/containers/abef348/top?ps_args=aux" - if !strings.HasSuffix(fakeRT.requests[0].URL.String(), expectedURI) { - t.Errorf("TopContainer: Expected URI to have %q. Got %q.", expectedURI, fakeRT.requests[0].URL.String()) - } -} - -func TestStatsTimeout(t *testing.T) { - l, err := net.Listen("unix", "/tmp/docker_test.sock") - if err != nil { - t.Fatal(err) - } - received := false - defer l.Close() - go func() { - l.Accept() - received = true - time.Sleep(time.Second) - }() - client, _ := NewClient("unix:///tmp/docker_test.sock") - client.SkipServerVersionCheck = true - errC := make(chan error, 1) - statsC := make(chan *Stats) - done := make(chan bool) - go func() { - errC <- client.Stats(StatsOptions{"c", statsC, true, done, time.Millisecond * 100}) - close(errC) - }() - err = <-errC - e, ok := err.(net.Error) - if !ok || !e.Timeout() { - t.Error("Failed to receive timeout exception") - } - if !received { - t.Fatal("Failed to receive message") - } -} - -func TestStats(t *testing.T) { - jsonStats1 := `{ - "read" : "2015-01-08T22:57:31.547920715Z", - "network" : { - "rx_dropped" : 0, - "rx_bytes" : 648, - "rx_errors" : 0, - "tx_packets" : 8, - "tx_dropped" : 0, - "rx_packets" : 8, - "tx_errors" : 0, - "tx_bytes" : 648 - }, - "memory_stats" : { - "stats" : { - "total_pgmajfault" : 0, - "cache" : 0, - "mapped_file" : 0, - "total_inactive_file" : 0, - "pgpgout" : 414, - "rss" : 6537216, - "total_mapped_file" : 0, - "writeback" : 0, - "unevictable" : 0, - "pgpgin" : 477, - "total_unevictable" : 0, - "pgmajfault" : 0, - "total_rss" : 6537216, - "total_rss_huge" : 6291456, - "total_writeback" : 0, - "total_inactive_anon" : 0, - "rss_huge" : 6291456, - "hierarchical_memory_limit": 189204833, - "total_pgfault" : 964, - "total_active_file" : 0, - "active_anon" : 6537216, - "total_active_anon" : 6537216, - "total_pgpgout" : 414, - "total_cache" : 0, - "inactive_anon" : 0, - "active_file" : 0, - "pgfault" : 964, - "inactive_file" : 0, - "total_pgpgin" : 477 - }, - "max_usage" : 6651904, - "usage" : 6537216, - "failcnt" : 0, - "limit" : 67108864 - }, - "blkio_stats": { - "io_service_bytes_recursive": [ - { - "major": 8, - "minor": 0, - "op": "Read", - "value": 428795731968 - }, - { - "major": 8, - "minor": 0, - "op": "Write", - "value": 388177920 - } - ], - "io_serviced_recursive": [ - { - "major": 8, - "minor": 0, - "op": "Read", - "value": 25994442 - }, - { - "major": 8, - "minor": 0, - "op": "Write", - "value": 1734 - } - ], - "io_queue_recursive": [], - "io_service_time_recursive": [], - "io_wait_time_recursive": [], - "io_merged_recursive": [], - "io_time_recursive": [], - "sectors_recursive": [] - }, - "cpu_stats" : { - "cpu_usage" : { - "percpu_usage" : [ - 16970827, - 1839451, - 7107380, - 10571290 - ], - "usage_in_usermode" : 10000000, - "total_usage" : 36488948, - "usage_in_kernelmode" : 20000000 - }, - "system_cpu_usage" : 20091722000000000 - }, - "precpu_stats" : { - "cpu_usage" : { - "percpu_usage" : [ - 16970827, - 1839451, - 7107380, - 10571290 - ], - "usage_in_usermode" : 10000000, - "total_usage" : 36488948, - "usage_in_kernelmode" : 20000000 - }, - "system_cpu_usage" : 20091722000000000 - } - }` - // 1 second later, cache is 100 - jsonStats2 := `{ - "read" : "2015-01-08T22:57:32.547920715Z", - "network" : { - "rx_dropped" : 0, - "rx_bytes" : 648, - "rx_errors" : 0, - "tx_packets" : 8, - "tx_dropped" : 0, - "rx_packets" : 8, - "tx_errors" : 0, - "tx_bytes" : 648 - }, - "memory_stats" : { - "stats" : { - "total_pgmajfault" : 0, - "cache" : 100, - "mapped_file" : 0, - "total_inactive_file" : 0, - "pgpgout" : 414, - "rss" : 6537216, - "total_mapped_file" : 0, - "writeback" : 0, - "unevictable" : 0, - "pgpgin" : 477, - "total_unevictable" : 0, - "pgmajfault" : 0, - "total_rss" : 6537216, - "total_rss_huge" : 6291456, - "total_writeback" : 0, - "total_inactive_anon" : 0, - "rss_huge" : 6291456, - "total_pgfault" : 964, - "total_active_file" : 0, - "active_anon" : 6537216, - "total_active_anon" : 6537216, - "total_pgpgout" : 414, - "total_cache" : 0, - "inactive_anon" : 0, - "active_file" : 0, - "pgfault" : 964, - "inactive_file" : 0, - "total_pgpgin" : 477 - }, - "max_usage" : 6651904, - "usage" : 6537216, - "failcnt" : 0, - "limit" : 67108864 - }, - "blkio_stats": { - "io_service_bytes_recursive": [ - { - "major": 8, - "minor": 0, - "op": "Read", - "value": 428795731968 - }, - { - "major": 8, - "minor": 0, - "op": "Write", - "value": 388177920 - } - ], - "io_serviced_recursive": [ - { - "major": 8, - "minor": 0, - "op": "Read", - "value": 25994442 - }, - { - "major": 8, - "minor": 0, - "op": "Write", - "value": 1734 - } - ], - "io_queue_recursive": [], - "io_service_time_recursive": [], - "io_wait_time_recursive": [], - "io_merged_recursive": [], - "io_time_recursive": [], - "sectors_recursive": [] - }, - "cpu_stats" : { - "cpu_usage" : { - "percpu_usage" : [ - 16970827, - 1839451, - 7107380, - 10571290 - ], - "usage_in_usermode" : 10000000, - "total_usage" : 36488948, - "usage_in_kernelmode" : 20000000 - }, - "system_cpu_usage" : 20091722000000000 - }, - "precpu_stats" : { - "cpu_usage" : { - "percpu_usage" : [ - 16970827, - 1839451, - 7107380, - 10571290 - ], - "usage_in_usermode" : 10000000, - "total_usage" : 36488948, - "usage_in_kernelmode" : 20000000 - }, - "system_cpu_usage" : 20091722000000000 - } - }` - var expected1 Stats - var expected2 Stats - err := json.Unmarshal([]byte(jsonStats1), &expected1) - if err != nil { - t.Fatal(err) - } - err = json.Unmarshal([]byte(jsonStats2), &expected2) - if err != nil { - t.Fatal(err) - } - id := "4fa6e0f0" - - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte(jsonStats1)) - w.Write([]byte(jsonStats2)) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - errC := make(chan error, 1) - statsC := make(chan *Stats) - done := make(chan bool) - go func() { - errC <- client.Stats(StatsOptions{id, statsC, true, done, 0}) - close(errC) - }() - var resultStats []*Stats - for { - stats, ok := <-statsC - if !ok { - break - } - resultStats = append(resultStats, stats) - } - err = <-errC - if err != nil { - t.Fatal(err) - } - if len(resultStats) != 2 { - t.Fatalf("Stats: Expected 2 results. Got %d.", len(resultStats)) - } - if !reflect.DeepEqual(resultStats[0], &expected1) { - t.Errorf("Stats: Expected:\n%+v\nGot:\n%+v", expected1, resultStats[0]) - } - if !reflect.DeepEqual(resultStats[1], &expected2) { - t.Errorf("Stats: Expected:\n%+v\nGot:\n%+v", expected2, resultStats[1]) - } - if req.Method != "GET" { - t.Errorf("Stats: wrong HTTP method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/containers/" + id + "/stats")) - if req.URL.Path != u.Path { - t.Errorf("Stats: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) - } -} - -func TestStatsContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - statsC := make(chan *Stats) - done := make(chan bool) - err := client.Stats(StatsOptions{"abef348", statsC, true, done, 0}) - expected := &NoSuchContainer{ID: "abef348"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("Stats: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestRenameContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := RenameContainerOptions{ID: "something_old", Name: "something_new"} - err := client.RenameContainer(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("RenameContainer: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/something_old/rename?name=something_new")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("RenameContainer: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - expectedValues := expectedURL.Query()["name"] - actualValues := req.URL.Query()["name"] - if len(actualValues) != 1 || expectedValues[0] != actualValues[0] { - t.Errorf("RenameContainer: Wrong params in request. Want %q. Got %q.", expectedValues, actualValues) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/env.go b/vendor/github.com/fsouza/go-dockerclient/env.go deleted file mode 100644 index c54b0b0e..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/env.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package docker - -import ( - "encoding/json" - "fmt" - "io" - "strconv" - "strings" -) - -// Env represents a list of key-pair represented in the form KEY=VALUE. -type Env []string - -// Get returns the string value of the given key. -func (env *Env) Get(key string) (value string) { - return env.Map()[key] -} - -// Exists checks whether the given key is defined in the internal Env -// representation. -func (env *Env) Exists(key string) bool { - _, exists := env.Map()[key] - return exists -} - -// GetBool returns a boolean representation of the given key. The key is false -// whenever its value if 0, no, false, none or an empty string. Any other value -// will be interpreted as true. -func (env *Env) GetBool(key string) (value bool) { - s := strings.ToLower(strings.Trim(env.Get(key), " \t")) - if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { - return false - } - return true -} - -// SetBool defines a boolean value to the given key. -func (env *Env) SetBool(key string, value bool) { - if value { - env.Set(key, "1") - } else { - env.Set(key, "0") - } -} - -// GetInt returns the value of the provided key, converted to int. -// -// It the value cannot be represented as an integer, it returns -1. -func (env *Env) GetInt(key string) int { - return int(env.GetInt64(key)) -} - -// SetInt defines an integer value to the given key. -func (env *Env) SetInt(key string, value int) { - env.Set(key, strconv.Itoa(value)) -} - -// GetInt64 returns the value of the provided key, converted to int64. -// -// It the value cannot be represented as an integer, it returns -1. -func (env *Env) GetInt64(key string) int64 { - s := strings.Trim(env.Get(key), " \t") - val, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return -1 - } - return val -} - -// SetInt64 defines an integer (64-bit wide) value to the given key. -func (env *Env) SetInt64(key string, value int64) { - env.Set(key, strconv.FormatInt(value, 10)) -} - -// GetJSON unmarshals the value of the provided key in the provided iface. -// -// iface is a value that can be provided to the json.Unmarshal function. -func (env *Env) GetJSON(key string, iface interface{}) error { - sval := env.Get(key) - if sval == "" { - return nil - } - return json.Unmarshal([]byte(sval), iface) -} - -// SetJSON marshals the given value to JSON format and stores it using the -// provided key. -func (env *Env) SetJSON(key string, value interface{}) error { - sval, err := json.Marshal(value) - if err != nil { - return err - } - env.Set(key, string(sval)) - return nil -} - -// GetList returns a list of strings matching the provided key. It handles the -// list as a JSON representation of a list of strings. -// -// If the given key matches to a single string, it will return a list -// containing only the value that matches the key. -func (env *Env) GetList(key string) []string { - sval := env.Get(key) - if sval == "" { - return nil - } - var l []string - if err := json.Unmarshal([]byte(sval), &l); err != nil { - l = append(l, sval) - } - return l -} - -// SetList stores the given list in the provided key, after serializing it to -// JSON format. -func (env *Env) SetList(key string, value []string) error { - return env.SetJSON(key, value) -} - -// Set defines the value of a key to the given string. -func (env *Env) Set(key, value string) { - *env = append(*env, key+"="+value) -} - -// Decode decodes `src` as a json dictionary, and adds each decoded key-value -// pair to the environment. -// -// If `src` cannot be decoded as a json dictionary, an error is returned. -func (env *Env) Decode(src io.Reader) error { - m := make(map[string]interface{}) - if err := json.NewDecoder(src).Decode(&m); err != nil { - return err - } - for k, v := range m { - env.SetAuto(k, v) - } - return nil -} - -// SetAuto will try to define the Set* method to call based on the given value. -func (env *Env) SetAuto(key string, value interface{}) { - if fval, ok := value.(float64); ok { - env.SetInt64(key, int64(fval)) - } else if sval, ok := value.(string); ok { - env.Set(key, sval) - } else if val, err := json.Marshal(value); err == nil { - env.Set(key, string(val)) - } else { - env.Set(key, fmt.Sprintf("%v", value)) - } -} - -// Map returns the map representation of the env. -func (env *Env) Map() map[string]string { - if len(*env) == 0 { - return nil - } - m := make(map[string]string) - for _, kv := range *env { - parts := strings.SplitN(kv, "=", 2) - m[parts[0]] = parts[1] - } - return m -} diff --git a/vendor/github.com/fsouza/go-dockerclient/env_test.go b/vendor/github.com/fsouza/go-dockerclient/env_test.go deleted file mode 100644 index df5169d0..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/env_test.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package docker - -import ( - "bytes" - "errors" - "reflect" - "sort" - "testing" -) - -func TestGet(t *testing.T) { - var tests = []struct { - input []string - query string - expected string - }{ - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PATH", "/usr/bin:/bin"}, - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", "/usr/local"}, - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", ""}, - {[]string{"WAT="}, "WAT", ""}, - } - for _, tt := range tests { - env := Env(tt.input) - got := env.Get(tt.query) - if got != tt.expected { - t.Errorf("Env.Get(%q): wrong result. Want %q. Got %q", tt.query, tt.expected, got) - } - } -} - -func TestExists(t *testing.T) { - var tests = []struct { - input []string - query string - expected bool - }{ - {[]string{"WAT=", "PYTHONPATH=/usr/local"}, "WAT", true}, - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", true}, - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", false}, - } - for _, tt := range tests { - env := Env(tt.input) - got := env.Exists(tt.query) - if got != tt.expected { - t.Errorf("Env.Exists(%q): wrong result. Want %v. Got %v", tt.query, tt.expected, got) - } - } -} - -func TestGetBool(t *testing.T) { - var tests = []struct { - input string - expected bool - }{ - {"EMTPY_VAR", false}, {"ZERO_VAR", false}, {"NO_VAR", false}, - {"FALSE_VAR", false}, {"NONE_VAR", false}, {"TRUE_VAR", true}, - {"WAT", true}, {"PATH", true}, {"ONE_VAR", true}, {"NO_VAR_TAB", false}, - } - env := Env([]string{ - "EMPTY_VAR=", "ZERO_VAR=0", "NO_VAR=no", "FALSE_VAR=false", - "NONE_VAR=none", "TRUE_VAR=true", "WAT=wat", "PATH=/usr/bin:/bin", - "ONE_VAR=1", "NO_VAR_TAB=0 \t\t\t", - }) - for _, tt := range tests { - got := env.GetBool(tt.input) - if got != tt.expected { - t.Errorf("Env.GetBool(%q): wrong result. Want %v. Got %v.", tt.input, tt.expected, got) - } - } -} - -func TestSetBool(t *testing.T) { - var tests = []struct { - input bool - expected string - }{ - {true, "1"}, {false, "0"}, - } - for _, tt := range tests { - var env Env - env.SetBool("SOME", tt.input) - if got := env.Get("SOME"); got != tt.expected { - t.Errorf("Env.SetBool(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got) - } - } -} - -func TestGetInt(t *testing.T) { - var tests = []struct { - input string - expected int - }{ - {"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2}, - } - env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"}) - for _, tt := range tests { - got := env.GetInt(tt.input) - if got != tt.expected { - t.Errorf("Env.GetInt(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got) - } - } -} - -func TestSetInt(t *testing.T) { - var tests = []struct { - input int - expected string - }{ - {10, "10"}, {13, "13"}, {7, "7"}, {33, "33"}, - {0, "0"}, {-34, "-34"}, - } - for _, tt := range tests { - var env Env - env.SetInt("SOME", tt.input) - if got := env.Get("SOME"); got != tt.expected { - t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got) - } - } -} - -func TestGetInt64(t *testing.T) { - var tests = []struct { - input string - expected int64 - }{ - {"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2}, - } - env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"}) - for _, tt := range tests { - got := env.GetInt64(tt.input) - if got != tt.expected { - t.Errorf("Env.GetInt64(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got) - } - } -} - -func TestSetInt64(t *testing.T) { - var tests = []struct { - input int64 - expected string - }{ - {10, "10"}, {13, "13"}, {7, "7"}, {33, "33"}, - {0, "0"}, {-34, "-34"}, - } - for _, tt := range tests { - var env Env - env.SetInt64("SOME", tt.input) - if got := env.Get("SOME"); got != tt.expected { - t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got) - } - } -} - -func TestGetJSON(t *testing.T) { - var p struct { - Name string `json:"name"` - Age int `json:"age"` - } - var env Env - env.Set("person", `{"name":"Gopher","age":5}`) - err := env.GetJSON("person", &p) - if err != nil { - t.Error(err) - } - if p.Name != "Gopher" { - t.Errorf("Env.GetJSON(%q): wrong name. Want %q. Got %q", "person", "Gopher", p.Name) - } - if p.Age != 5 { - t.Errorf("Env.GetJSON(%q): wrong age. Want %d. Got %d", "person", 5, p.Age) - } -} - -func TestGetJSONAbsent(t *testing.T) { - var l []string - var env Env - err := env.GetJSON("person", &l) - if err != nil { - t.Error(err) - } - if l != nil { - t.Errorf("Env.GetJSON(): get unexpected list %v", l) - } -} - -func TestGetJSONFailure(t *testing.T) { - var p []string - var env Env - env.Set("list-person", `{"name":"Gopher","age":5}`) - err := env.GetJSON("list-person", &p) - if err == nil { - t.Errorf("Env.GetJSON(%q): got unexpected error.", "list-person") - } -} - -func TestSetJSON(t *testing.T) { - var p1 = struct { - Name string `json:"name"` - Age int `json:"age"` - }{Name: "Gopher", Age: 5} - var env Env - err := env.SetJSON("person", p1) - if err != nil { - t.Error(err) - } - var p2 struct { - Name string `json:"name"` - Age int `json:"age"` - } - err = env.GetJSON("person", &p2) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(p1, p2) { - t.Errorf("Env.SetJSON(%q): wrong result. Want %v. Got %v", "person", p1, p2) - } -} - -func TestSetJSONFailure(t *testing.T) { - var env Env - err := env.SetJSON("person", unmarshable{}) - if err == nil { - t.Error("Env.SetJSON(): got unexpected error") - } - if env.Exists("person") { - t.Errorf("Env.SetJSON(): should not define the key %q, but did", "person") - } -} - -func TestGetList(t *testing.T) { - var tests = []struct { - input string - expected []string - }{ - {"WAT=wat", []string{"wat"}}, - {`WAT=["wat","wet","wit","wot","wut"]`, []string{"wat", "wet", "wit", "wot", "wut"}}, - {"WAT=", nil}, - } - for _, tt := range tests { - env := Env([]string{tt.input}) - got := env.GetList("WAT") - if !reflect.DeepEqual(got, tt.expected) { - t.Errorf("Env.GetList(%q): wrong result. Want %v. Got %v", "WAT", tt.expected, got) - } - } -} - -func TestSetList(t *testing.T) { - list := []string{"a", "b", "c"} - var env Env - if err := env.SetList("SOME", list); err != nil { - t.Error(err) - } - if got := env.GetList("SOME"); !reflect.DeepEqual(got, list) { - t.Errorf("Env.SetList(%v): wrong result. Got %v", list, got) - } -} - -func TestSet(t *testing.T) { - var env Env - env.Set("PATH", "/home/bin:/bin") - env.Set("SOMETHING", "/usr/bin") - env.Set("PATH", "/bin") - if expected, got := "/usr/bin", env.Get("SOMETHING"); got != expected { - t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got) - } - if expected, got := "/bin", env.Get("PATH"); got != expected { - t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got) - } -} - -func TestDecode(t *testing.T) { - var tests = []struct { - input string - expectedOut []string - expectedErr string - }{ - { - `{"PATH":"/usr/bin:/bin","containers":54,"wat":["123","345"]}`, - []string{"PATH=/usr/bin:/bin", "containers=54", `wat=["123","345"]`}, - "", - }, - {"}}", nil, "invalid character '}' looking for beginning of value"}, - {`{}`, nil, ""}, - } - for _, tt := range tests { - var env Env - err := env.Decode(bytes.NewBufferString(tt.input)) - if tt.expectedErr == "" { - if err != nil { - t.Error(err) - } - } else if tt.expectedErr != err.Error() { - t.Errorf("Env.Decode(): invalid error. Want %q. Got %q.", tt.expectedErr, err) - } - got := []string(env) - sort.Strings(got) - sort.Strings(tt.expectedOut) - if !reflect.DeepEqual(got, tt.expectedOut) { - t.Errorf("Env.Decode(): wrong result. Want %v. Got %v.", tt.expectedOut, got) - } - } -} - -func TestSetAuto(t *testing.T) { - buf := bytes.NewBufferString("oi") - var tests = []struct { - input interface{} - expected string - }{ - {10, "10"}, - {10.3, "10"}, - {"oi", "oi"}, - {buf, "{}"}, - {unmarshable{}, "{}"}, - } - for _, tt := range tests { - var env Env - env.SetAuto("SOME", tt.input) - if got := env.Get("SOME"); got != tt.expected { - t.Errorf("Env.SetAuto(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got) - } - } -} - -func TestMap(t *testing.T) { - var tests = []struct { - input []string - expected map[string]string - }{ - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, map[string]string{"PATH": "/usr/bin:/bin", "PYTHONPATH": "/usr/local"}}, - {nil, nil}, - } - for _, tt := range tests { - env := Env(tt.input) - got := env.Map() - if !reflect.DeepEqual(got, tt.expected) { - t.Errorf("Env.Map(): wrong result. Want %v. Got %v", tt.expected, got) - } - } -} - -type unmarshable struct { -} - -func (unmarshable) MarshalJSON() ([]byte, error) { - return nil, errors.New("cannot marshal") -} diff --git a/vendor/github.com/fsouza/go-dockerclient/event.go b/vendor/github.com/fsouza/go-dockerclient/event.go deleted file mode 100644 index eaffddb8..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/event.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "net" - "net/http" - "net/http/httputil" - "sync" - "sync/atomic" - "time" -) - -// APIEvents represents an event returned by the API. -type APIEvents struct { - Status string `json:"Status,omitempty" yaml:"Status,omitempty"` - ID string `json:"ID,omitempty" yaml:"ID,omitempty"` - From string `json:"From,omitempty" yaml:"From,omitempty"` - Time int64 `json:"Time,omitempty" yaml:"Time,omitempty"` -} - -type eventMonitoringState struct { - sync.RWMutex - sync.WaitGroup - enabled bool - lastSeen *int64 - C chan *APIEvents - errC chan error - listeners []chan<- *APIEvents -} - -const ( - maxMonitorConnRetries = 5 - retryInitialWaitTime = 10. -) - -var ( - // ErrNoListeners is the error returned when no listeners are available - // to receive an event. - ErrNoListeners = errors.New("no listeners present to receive event") - - // ErrListenerAlreadyExists is the error returned when the listerner already - // exists. - ErrListenerAlreadyExists = errors.New("listener already exists for docker events") - - // EOFEvent is sent when the event listener receives an EOF error. - EOFEvent = &APIEvents{ - Status: "EOF", - } -) - -// AddEventListener adds a new listener to container events in the Docker API. -// -// The parameter is a channel through which events will be sent. -func (c *Client) AddEventListener(listener chan<- *APIEvents) error { - var err error - if !c.eventMonitor.isEnabled() { - err = c.eventMonitor.enableEventMonitoring(c) - if err != nil { - return err - } - } - err = c.eventMonitor.addListener(listener) - if err != nil { - return err - } - return nil -} - -// RemoveEventListener removes a listener from the monitor. -func (c *Client) RemoveEventListener(listener chan *APIEvents) error { - err := c.eventMonitor.removeListener(listener) - if err != nil { - return err - } - if len(c.eventMonitor.listeners) == 0 { - c.eventMonitor.disableEventMonitoring() - } - return nil -} - -func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error { - eventState.Lock() - defer eventState.Unlock() - if listenerExists(listener, &eventState.listeners) { - return ErrListenerAlreadyExists - } - eventState.Add(1) - eventState.listeners = append(eventState.listeners, listener) - return nil -} - -func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error { - eventState.Lock() - defer eventState.Unlock() - if listenerExists(listener, &eventState.listeners) { - var newListeners []chan<- *APIEvents - for _, l := range eventState.listeners { - if l != listener { - newListeners = append(newListeners, l) - } - } - eventState.listeners = newListeners - eventState.Add(-1) - } - return nil -} - -func (eventState *eventMonitoringState) closeListeners() { - for _, l := range eventState.listeners { - close(l) - eventState.Add(-1) - } - eventState.listeners = nil -} - -func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool { - for _, b := range *list { - if b == a { - return true - } - } - return false -} - -func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error { - eventState.Lock() - defer eventState.Unlock() - if !eventState.enabled { - eventState.enabled = true - var lastSeenDefault = int64(0) - eventState.lastSeen = &lastSeenDefault - eventState.C = make(chan *APIEvents, 100) - eventState.errC = make(chan error, 1) - go eventState.monitorEvents(c) - } - return nil -} - -func (eventState *eventMonitoringState) disableEventMonitoring() error { - eventState.Lock() - defer eventState.Unlock() - - eventState.closeListeners() - - eventState.Wait() - - if eventState.enabled { - eventState.enabled = false - close(eventState.C) - close(eventState.errC) - } - return nil -} - -func (eventState *eventMonitoringState) monitorEvents(c *Client) { - var err error - for eventState.noListeners() { - time.Sleep(10 * time.Millisecond) - } - if err = eventState.connectWithRetry(c); err != nil { - // terminate if connect failed - eventState.disableEventMonitoring() - return - } - for eventState.isEnabled() { - timeout := time.After(100 * time.Millisecond) - select { - case ev, ok := <-eventState.C: - if !ok { - return - } - if ev == EOFEvent { - eventState.disableEventMonitoring() - return - } - eventState.updateLastSeen(ev) - go eventState.sendEvent(ev) - case err = <-eventState.errC: - if err == ErrNoListeners { - eventState.disableEventMonitoring() - return - } else if err != nil { - defer func() { go eventState.monitorEvents(c) }() - return - } - case <-timeout: - continue - } - } -} - -func (eventState *eventMonitoringState) connectWithRetry(c *Client) error { - var retries int - var err error - for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ { - waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries))) - time.Sleep(time.Duration(waitTime) * time.Millisecond) - err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC) - } - return err -} - -func (eventState *eventMonitoringState) noListeners() bool { - eventState.RLock() - defer eventState.RUnlock() - return len(eventState.listeners) == 0 -} - -func (eventState *eventMonitoringState) isEnabled() bool { - eventState.RLock() - defer eventState.RUnlock() - return eventState.enabled -} - -func (eventState *eventMonitoringState) sendEvent(event *APIEvents) { - eventState.RLock() - defer eventState.RUnlock() - eventState.Add(1) - defer eventState.Done() - if eventState.enabled { - if len(eventState.listeners) == 0 { - eventState.errC <- ErrNoListeners - return - } - - for _, listener := range eventState.listeners { - listener <- event - } - } -} - -func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) { - eventState.Lock() - defer eventState.Unlock() - if atomic.LoadInt64(eventState.lastSeen) < e.Time { - atomic.StoreInt64(eventState.lastSeen, e.Time) - } -} - -func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error { - uri := "/events" - if startTime != 0 { - uri += fmt.Sprintf("?since=%d", startTime) - } - protocol := c.endpointURL.Scheme - address := c.endpointURL.Path - if protocol != "unix" { - protocol = "tcp" - address = c.endpointURL.Host - } - var dial net.Conn - var err error - if c.TLSConfig == nil { - dial, err = c.Dialer.Dial(protocol, address) - } else { - dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig) - } - if err != nil { - return err - } - conn := httputil.NewClientConn(dial, nil) - req, err := http.NewRequest("GET", uri, nil) - if err != nil { - return err - } - res, err := conn.Do(req) - if err != nil { - return err - } - go func(res *http.Response, conn *httputil.ClientConn) { - defer conn.Close() - defer res.Body.Close() - decoder := json.NewDecoder(res.Body) - for { - var event APIEvents - if err = decoder.Decode(&event); err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { - if c.eventMonitor.isEnabled() { - // Signal that we're exiting. - eventChan <- EOFEvent - } - break - } - errChan <- err - } - if event.Time == 0 { - continue - } - if !c.eventMonitor.isEnabled() { - return - } - eventChan <- &event - } - }(res, conn) - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/event_test.go b/vendor/github.com/fsouza/go-dockerclient/event_test.go deleted file mode 100644 index a308538c..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/event_test.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bufio" - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" -) - -func TestEventListeners(t *testing.T) { - testEventListeners("TestEventListeners", t, httptest.NewServer, NewClient) -} - -func TestTLSEventListeners(t *testing.T) { - testEventListeners("TestTLSEventListeners", t, func(handler http.Handler) *httptest.Server { - server := httptest.NewUnstartedServer(handler) - - cert, err := tls.LoadX509KeyPair("testing/data/server.pem", "testing/data/serverkey.pem") - if err != nil { - t.Fatalf("Error loading server key pair: %s", err) - } - - caCert, err := ioutil.ReadFile("testing/data/ca.pem") - if err != nil { - t.Fatalf("Error loading ca certificate: %s", err) - } - caPool := x509.NewCertPool() - if !caPool.AppendCertsFromPEM(caCert) { - t.Fatalf("Could not add ca certificate") - } - - server.TLS = &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: caPool, - } - server.StartTLS() - return server - }, func(url string) (*Client, error) { - return NewTLSClient(url, "testing/data/cert.pem", "testing/data/key.pem", "testing/data/ca.pem") - }) -} - -func testEventListeners(testName string, t *testing.T, buildServer func(http.Handler) *httptest.Server, buildClient func(string) (*Client, error)) { - response := `{"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} -{"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} -{"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} -{"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} -` - - server := buildServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - rsc := bufio.NewScanner(strings.NewReader(response)) - for rsc.Scan() { - w.Write([]byte(rsc.Text())) - w.(http.Flusher).Flush() - time.Sleep(10 * time.Millisecond) - } - })) - defer server.Close() - - client, err := buildClient(server.URL) - if err != nil { - t.Errorf("Failed to create client: %s", err) - } - client.SkipServerVersionCheck = true - - listener := make(chan *APIEvents, 10) - defer func() { - time.Sleep(10 * time.Millisecond) - if err := client.RemoveEventListener(listener); err != nil { - t.Error(err) - } - }() - - err = client.AddEventListener(listener) - if err != nil { - t.Errorf("Failed to add event listener: %s", err) - } - - timeout := time.After(1 * time.Second) - var count int - - for { - select { - case msg := <-listener: - t.Logf("Received: %v", *msg) - count++ - err = checkEvent(count, msg) - if err != nil { - t.Fatalf("Check event failed: %s", err) - } - if count == 4 { - return - } - case <-timeout: - t.Fatalf("%s timed out waiting on events", testName) - } - } -} - -func checkEvent(index int, event *APIEvents) error { - if event.ID != "dfdf82bd3881" { - return fmt.Errorf("event ID did not match. Expected dfdf82bd3881 got %s", event.ID) - } - if event.From != "base:latest" { - return fmt.Errorf("event from did not match. Expected base:latest got %s", event.From) - } - var status string - switch index { - case 1: - status = "create" - case 2: - status = "start" - case 3: - status = "stop" - case 4: - status = "destroy" - } - if event.Status != status { - return fmt.Errorf("event status did not match. Expected %s got %s", status, event.Status) - } - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/example_test.go b/vendor/github.com/fsouza/go-dockerclient/example_test.go deleted file mode 100644 index 8c2c719e..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/example_test.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker_test - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "log" - "time" - - "github.com/fsouza/go-dockerclient" -) - -func ExampleClient_AttachToContainer() { - client, err := docker.NewClient("http://localhost:4243") - if err != nil { - log.Fatal(err) - } - client.SkipServerVersionCheck = true - // Reading logs from container a84849 and sending them to buf. - var buf bytes.Buffer - err = client.AttachToContainer(docker.AttachToContainerOptions{ - Container: "a84849", - OutputStream: &buf, - Logs: true, - Stdout: true, - Stderr: true, - }) - if err != nil { - log.Fatal(err) - } - log.Println(buf.String()) - buf.Reset() - err = client.AttachToContainer(docker.AttachToContainerOptions{ - Container: "a84849", - OutputStream: &buf, - Stdout: true, - Stream: true, - }) - if err != nil { - log.Fatal(err) - } - log.Println(buf.String()) -} - -func ExampleClient_CopyFromContainer() { - client, err := docker.NewClient("http://localhost:4243") - if err != nil { - log.Fatal(err) - } - cid := "a84849" - var buf bytes.Buffer - filename := "/tmp/output.txt" - err = client.CopyFromContainer(docker.CopyFromContainerOptions{ - Container: cid, - Resource: filename, - OutputStream: &buf, - }) - if err != nil { - log.Fatalf("Error while copying from %s: %s\n", cid, err) - } - content := new(bytes.Buffer) - r := bytes.NewReader(buf.Bytes()) - tr := tar.NewReader(r) - tr.Next() - if err != nil && err != io.EOF { - log.Fatal(err) - } - if _, err := io.Copy(content, tr); err != nil { - log.Fatal(err) - } - log.Println(buf.String()) -} - -func ExampleClient_BuildImage() { - client, err := docker.NewClient("http://localhost:4243") - if err != nil { - log.Fatal(err) - } - - t := time.Now() - inputbuf, outputbuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil) - tr := tar.NewWriter(inputbuf) - tr.WriteHeader(&tar.Header{Name: "Dockerfile", Size: 10, ModTime: t, AccessTime: t, ChangeTime: t}) - tr.Write([]byte("FROM base\n")) - tr.Close() - opts := docker.BuildImageOptions{ - Name: "test", - InputStream: inputbuf, - OutputStream: outputbuf, - } - if err := client.BuildImage(opts); err != nil { - log.Fatal(err) - } -} - -func ExampleClient_ListenEvents() { - client, err := docker.NewClient("http://localhost:4243") - if err != nil { - log.Fatal(err) - } - - listener := make(chan *docker.APIEvents) - err = client.AddEventListener(listener) - if err != nil { - log.Fatal(err) - } - - defer func() { - - err = client.RemoveEventListener(listener) - if err != nil { - log.Fatal(err) - } - - }() - - timeout := time.After(1 * time.Second) - - for { - select { - case msg := <-listener: - log.Println(msg) - case <-timeout: - break - } - } - -} - -func ExampleEnv_Map() { - e := docker.Env([]string{"A=1", "B=2", "C=3"}) - envs := e.Map() - for k, v := range envs { - fmt.Printf("%s=%q\n", k, v) - } -} - -func ExampleEnv_SetJSON() { - type Person struct { - Name string - Age int - } - p := Person{Name: "Gopher", Age: 4} - var e docker.Env - err := e.SetJSON("person", p) - if err != nil { - log.Fatal(err) - } -} - -func ExampleEnv_GetJSON() { - type Person struct { - Name string - Age int - } - p := Person{Name: "Gopher", Age: 4} - var e docker.Env - e.Set("person", `{"name":"Gopher","age":4}`) - err := e.GetJSON("person", &p) - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/exec.go b/vendor/github.com/fsouza/go-dockerclient/exec.go deleted file mode 100644 index f3b705fa..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/exec.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strconv" -) - -// Exec is the type representing a `docker exec` instance and containing the -// instance ID -type Exec struct { - ID string `json:"Id,omitempty" yaml:"Id,omitempty"` -} - -// CreateExecOptions specify parameters to the CreateExecContainer function. -// -// See https://goo.gl/1KSIb7 for more details -type CreateExecOptions struct { - AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` - AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` - AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` - Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` - Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"` - Container string `json:"Container,omitempty" yaml:"Container,omitempty"` - User string `json:"User,omitempty" yaml:"User,omitempty"` -} - -// CreateExec sets up an exec instance in a running container `id`, returning the exec -// instance, or an error in case of failure. -// -// See https://goo.gl/1KSIb7 for more details -func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) { - path := fmt.Sprintf("/containers/%s/exec", opts.Container) - resp, err := c.do("POST", path, doOptions{data: opts}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: opts.Container} - } - return nil, err - } - defer resp.Body.Close() - var exec Exec - if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil { - return nil, err - } - - return &exec, nil -} - -// StartExecOptions specify parameters to the StartExecContainer function. -// -// See https://goo.gl/iQCnto for more details -type StartExecOptions struct { - Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"` - - Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` - - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - ErrorStream io.Writer `qs:"-"` - - // Use raw terminal? Usually true when the container contains a TTY. - RawTerminal bool `qs:"-"` - - // If set, after a successful connect, a sentinel will be sent and then the - // client will block on receive before continuing. - // - // It must be an unbuffered channel. Using a buffered channel can lead - // to unexpected behavior. - Success chan struct{} `json:"-"` -} - -// StartExec starts a previously set up exec instance id. If opts.Detach is -// true, it returns after starting the exec command. Otherwise, it sets up an -// interactive session with the exec command. -// -// See https://goo.gl/iQCnto for more details -func (c *Client) StartExec(id string, opts StartExecOptions) error { - if id == "" { - return &NoSuchExec{ID: id} - } - - path := fmt.Sprintf("/exec/%s/start", id) - - if opts.Detach { - resp, err := c.do("POST", path, doOptions{data: opts}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchExec{ID: id} - } - return err - } - defer resp.Body.Close() - return nil - } - - return c.hijack("POST", path, hijackOptions{ - success: opts.Success, - setRawTerminal: opts.RawTerminal, - in: opts.InputStream, - stdout: opts.OutputStream, - stderr: opts.ErrorStream, - data: opts, - }) -} - -// ResizeExecTTY resizes the tty session used by the exec command id. This API -// is valid only if Tty was specified as part of creating and starting the exec -// command. -// -// See https://goo.gl/e1JpsA for more details -func (c *Client) ResizeExecTTY(id string, height, width int) error { - params := make(url.Values) - params.Set("h", strconv.Itoa(height)) - params.Set("w", strconv.Itoa(width)) - - path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode()) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ExecProcessConfig is a type describing the command associated to a Exec -// instance. It's used in the ExecInspect type. -type ExecProcessConfig struct { - Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"` - User string `json:"user,omitempty" yaml:"user,omitempty"` - Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"` - EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"` - Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"` -} - -// ExecInspect is a type with details about a exec instance, including the -// exit code if the command has finished running. It's returned by a api -// call to /exec/(id)/json -// -// See https://goo.gl/gPtX9R for more details -type ExecInspect struct { - ID string `json:"ID,omitempty" yaml:"ID,omitempty"` - Running bool `json:"Running,omitempty" yaml:"Running,omitempty"` - ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` - OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"` - OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"` - ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"` - Container Container `json:"Container,omitempty" yaml:"Container,omitempty"` -} - -// InspectExec returns low-level information about the exec command id. -// -// See https://goo.gl/gPtX9R for more details -func (c *Client) InspectExec(id string) (*ExecInspect, error) { - path := fmt.Sprintf("/exec/%s/json", id) - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchExec{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var exec ExecInspect - if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil { - return nil, err - } - return &exec, nil -} - -// NoSuchExec is the error returned when a given exec instance does not exist. -type NoSuchExec struct { - ID string -} - -func (err *NoSuchExec) Error() string { - return "No such exec instance: " + err.ID -} diff --git a/vendor/github.com/fsouza/go-dockerclient/exec_test.go b/vendor/github.com/fsouza/go-dockerclient/exec_test.go deleted file mode 100644 index 2dc8d210..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/exec_test.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/json" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "strings" - "testing" -) - -func TestExecCreate(t *testing.T) { - jsonContainer := `{"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"}` - var expected struct{ ID string } - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - config := CreateExecOptions{ - Container: "test", - AttachStdin: true, - AttachStdout: true, - AttachStderr: false, - Tty: false, - Cmd: []string{"touch", "/tmp/file"}, - User: "a-user", - } - execObj, err := client.CreateExec(config) - if err != nil { - t.Fatal(err) - } - expectedID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - if execObj.ID != expectedID { - t.Errorf("ExecCreate: wrong ID. Want %q. Got %q.", expectedID, execObj.ID) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("ExecCreate: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/test/exec")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - var gotBody struct{ ID string } - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } -} - -func TestExecStartDetached(t *testing.T) { - execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - fakeRT := &FakeRoundTripper{status: http.StatusOK} - client := newTestClient(fakeRT) - config := StartExecOptions{ - Detach: true, - } - err := client.StartExec(execID, config) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/start")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - t.Log(req.Body) - var gotBody struct{ Detach bool } - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } - if !gotBody.Detach { - t.Fatal("Expected Detach in StartExecOptions to be true") - } -} - -func TestExecStartAndAttach(t *testing.T) { - var reader = strings.NewReader("send value") - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout, stderr bytes.Buffer - success := make(chan struct{}) - execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - opts := StartExecOptions{ - OutputStream: &stdout, - ErrorStream: &stderr, - InputStream: reader, - RawTerminal: true, - Success: success, - } - go func() { - if err := client.StartExec(execID, opts); err != nil { - t.Error(err) - } - }() - <-success -} - -func TestExecResize(t *testing.T) { - execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - fakeRT := &FakeRoundTripper{status: http.StatusOK} - client := newTestClient(fakeRT) - err := client.ResizeExecTTY(execID, 10, 20) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/resize?h=10&w=20")) - if gotPath := req.URL.RequestURI(); gotPath != expectedURL.RequestURI() { - t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } -} - -func TestExecInspect(t *testing.T) { - jsonExec := `{ - "ID": "32adfeeec34250f9530ce1dafd40c6233832315e065ea6b362d745e2f63cde0e", - "Running": true, - "ExitCode": 0, - "ProcessConfig": { - "privileged": false, - "user": "", - "tty": true, - "entrypoint": "bash", - "arguments": [] - }, - "OpenStdin": true, - "OpenStderr": true, - "OpenStdout": true, - "Container": { - "State": { - "Running": true, - "Paused": false, - "Restarting": false, - "OOMKilled": false, - "Pid": 29392, - "ExitCode": 0, - "Error": "", - "StartedAt": "2015-01-21T17:08:59.634662178Z", - "FinishedAt": "0001-01-01T00:00:00Z" - }, - "ID": "922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521", - "Created": "2015-01-21T17:08:59.46407212Z", - "Path": "/bin/bash", - "Args": [ - "-lc", - "tsuru_unit_agent http://192.168.50.4:8080 689b30e0ab3adce374346de2e72512138e0e8b75 gtest /var/lib/tsuru/start && tail -f /dev/null" - ], - "Config": { - "Hostname": "922cd0568714", - "Domainname": "", - "User": "ubuntu", - "Memory": 0, - "MemorySwap": 0, - "CpuShares": 100, - "Cpuset": "", - "AttachStdin": false, - "AttachStdout": false, - "AttachStderr": false, - "PortSpecs": null, - "ExposedPorts": { - "8888/tcp": {} - }, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - ], - "Cmd": [ - "/bin/bash", - "-lc", - "tsuru_unit_agent http://192.168.50.4:8080 689b30e0ab3adce374346de2e72512138e0e8b75 gtest /var/lib/tsuru/start && tail -f /dev/null" - ], - "Image": "tsuru/app-gtest", - "Volumes": null, - "WorkingDir": "", - "Entrypoint": null, - "NetworkDisabled": false, - "MacAddress": "", - "OnBuild": null - }, - "Image": "a88060b8b54fde0f7168c86742d0ce83b80f3f10925d85c98fdad9ed00bef544", - "NetworkSettings": { - "IPAddress": "172.17.0.8", - "IPPrefixLen": 16, - "MacAddress": "02:42:ac:11:00:08", - "LinkLocalIPv6Address": "fe80::42:acff:fe11:8", - "LinkLocalIPv6PrefixLen": 64, - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "Gateway": "172.17.42.1", - "IPv6Gateway": "", - "Bridge": "docker0", - "PortMapping": null, - "Ports": { - "8888/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49156" - } - ] - } - }, - "ResolvConfPath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/resolv.conf", - "HostnamePath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/hostname", - "HostsPath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/hosts", - "Name": "/c7e43b72288ee9d0270a", - "Driver": "aufs", - "ExecDriver": "native-0.2", - "MountLabel": "", - "ProcessLabel": "", - "AppArmorProfile": "", - "RestartCount": 0, - "UpdateDns": false, - "Volumes": {}, - "VolumesRW": {} - } - }` - var expected ExecInspect - err := json.Unmarshal([]byte(jsonExec), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonExec, status: http.StatusOK} - client := newTestClient(fakeRT) - expectedID := "32adfeeec34250f9530ce1dafd40c6233832315e065ea6b362d745e2f63cde0e" - execObj, err := client.InspectExec(expectedID) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*execObj, expected) { - t.Errorf("ExecInspect: Expected %#v. Got %#v.", expected, *execObj) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("ExecInspect: wrong HTTP method. Want %q. Got %q.", "GET", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/exec/" + expectedID + "/json")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("ExecInspect: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index ecc84327..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,55 +0,0 @@ -# 0.9.0 (Unreleased) - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index 53d27d44..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,358 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | -| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events. - - ```go - logrus.SetFormatter(&logstash.LogstashFormatter{Type: “application_name"}) - ``` - -Third party logging formatters: - -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - - -[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go deleted file mode 100644 index dddd5f87..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package logrus is a structured logger for Go, completely API compatible with the standard library logger. - - -The simplest way to use Logrus is simply the package-level exported logger: - - package main - - import ( - log "github.com/Sirupsen/logrus" - ) - - func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "number": 1, - "size": 10, - }).Info("A walrus appears") - } - -Output: - time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 - -For a full guide visit https://github.com/Sirupsen/logrus -*/ -package logrus diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go deleted file mode 100644 index 699ea035..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go +++ /dev/null @@ -1,254 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "io" - "os" - "time" -) - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic - Level Level - - // Message passed to Debug, Info, Warn, Error, Fatal or Panic - Message string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), - } -} - -// Returns a reader for the entry, which is a proxy to the formatter. -func (entry *Entry) Reader() (*bytes.Buffer, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - return bytes.NewBuffer(serialized), err -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - reader, err := entry.Reader() - if err != nil { - return "", err - } - - return reader.String(), err -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := Fields{} - for k, v := range entry.Data { - data[k] = v - } - for k, v := range fields { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data} -} - -func (entry *Entry) log(level Level, msg string) { - entry.Time = time.Now() - entry.Level = level - entry.Message = msg - - if err := entry.Logger.Hooks.Fire(level, entry); err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() - } - - reader, err := entry.Reader() - if err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() - } - - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - - _, err = io.Copy(entry.Logger.Out, reader) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(entry) - } -} - -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } -} - -// Entry Println family functions - -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go deleted file mode 100644 index cd90aa7d..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "testing" - - "github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert" -) - -func TestEntryPanicln(t *testing.T) { - errBoom := fmt.Errorf("boom time") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicln("kaboom") -} - -func TestEntryPanicf(t *testing.T) { - errBoom := fmt.Errorf("boom again") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom true", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicf("kaboom %v", true) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go deleted file mode 100644 index a67e1b80..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go +++ /dev/null @@ -1,188 +0,0 @@ -package logrus - -import ( - "io" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() - std.Level = level -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.Level -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go deleted file mode 100644 index 104d689f..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go +++ /dev/null @@ -1,48 +0,0 @@ -package logrus - -import "time" - -const DefaultTimestampFormat = time.RFC3339 - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - _, ok := data["time"] - if ok { - data["fields.time"] = data["time"] - } - - _, ok = data["msg"] - if ok { - data["fields.msg"] = data["msg"] - } - - _, ok = data["level"] - if ok { - data["fields.level"] = data["level"] - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go deleted file mode 100644 index c6d290c7..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package logrus - -import ( - "fmt" - "testing" - "time" -) - -// smallFields is a small size data set for benchmarking -var smallFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", -} - -// largeFields is a large size data set for benchmarking -var largeFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", - "five": "six", - "seven": "eight", - "nine": "ten", - "eleven": "twelve", - "thirteen": "fourteen", - "fifteen": "sixteen", - "seventeen": "eighteen", - "nineteen": "twenty", - "a": "b", - "c": "d", - "e": "f", - "g": "h", - "i": "j", - "k": "l", - "m": "n", - "o": "p", - "q": "r", - "s": "t", - "u": "v", - "w": "x", - "y": "z", - "this": "will", - "make": "thirty", - "entries": "yeah", -} - -var errorFields = Fields{ - "foo": fmt.Errorf("bar"), - "baz": fmt.Errorf("qux"), -} - -func BenchmarkErrorTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) -} - -func BenchmarkSmallTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) -} - -func BenchmarkLargeTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) -} - -func BenchmarkSmallColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) -} - -func BenchmarkLargeColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) -} - -func BenchmarkSmallJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, smallFields) -} - -func BenchmarkLargeJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, largeFields) -} - -func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { - entry := &Entry{ - Time: time.Time{}, - Level: InfoLevel, - Message: "message", - Data: fields, - } - var d []byte - var err error - for i := 0; i < b.N; i++ { - d, err = formatter.Format(entry) - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(d))) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go deleted file mode 100644 index 938b9749..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package logrus - -import ( - "testing" - - "github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert" -) - -type TestHook struct { - Fired bool -} - -func (hook *TestHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *TestHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookFires(t *testing.T) { - hook := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - assert.Equal(t, hook.Fired, false) - - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} - -type ModifyHook struct { -} - -func (hook *ModifyHook) Fire(entry *Entry) error { - entry.Data["wow"] = "whale" - return nil -} - -func (hook *ModifyHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookCanModifyEntry(t *testing.T) { - hook := new(ModifyHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - }) -} - -func TestCanFireMultipleHooks(t *testing.T) { - hook1 := new(ModifyHook) - hook2 := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook1) - log.Hooks.Add(hook2) - - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - assert.Equal(t, hook2.Fired, true) - }) -} - -type ErrorHook struct { - Fired bool -} - -func (hook *ErrorHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *ErrorHook) Levels() []Level { - return []Level{ - ErrorLevel, - } -} - -func TestErrorHookShouldntFireOnInfo(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, false) - }) -} - -func TestErrorHookShouldFireOnError(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Error("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go deleted file mode 100644 index 3f151cdc..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index 2ad6dc5c..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,41 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" -) - -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string -} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - prefixFieldClashes(data) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - - data["time"] = entry.Time.Format(timestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go deleted file mode 100644 index 1d708732..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package logrus - -import ( - "encoding/json" - "errors" - - "testing" -) - -func TestErrorNotLost(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["error"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["omg"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestFieldClashWithTime(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("time", "right now!")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.time"] != "right now!" { - t.Fatal("fields.time not set to original time field") - } - - if entry["time"] != "0001-01-01T00:00:00Z" { - t.Fatal("time field not set to current time, was: ", entry["time"]) - } -} - -func TestFieldClashWithMsg(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("msg", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.msg"] != "something" { - t.Fatal("fields.msg not set to original msg field") - } -} - -func TestFieldClashWithLevel(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.level"] != "something" { - t.Fatal("fields.level not set to original level field") - } -} - -func TestJSONEntryEndsWithNewline(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - if b[len(b)-1] != '\n' { - t.Fatal("Expected JSON log entry to end with a newline") - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go deleted file mode 100644 index e4974bfb..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go +++ /dev/null @@ -1,206 +0,0 @@ -package logrus - -import ( - "io" - "os" - "sync" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stdout`. You can also set this to - // something more adventorous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in - Level Level - // Used to sync writing to the log. - mu sync.Mutex -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(LevelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, - } -} - -// Adds a field to the log entry, note that you it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// Ff you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - return NewEntry(logger).WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - return NewEntry(logger).WithFields(fields) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugf(format, args...) - } -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infof(format, args...) - } -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - NewEntry(logger).Printf(format, args...) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorf(format, args...) - } -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalf(format, args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicf(format, args...) - } -} - -func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debug(args...) - } -} - -func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Info(args...) - } -} - -func (logger *Logger) Print(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Error(args...) - } -} - -func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatal(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panic(args...) - } -} - -func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugln(args...) - } -} - -func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infoln(args...) - } -} - -func (logger *Logger) Println(args ...interface{}) { - NewEntry(logger).Println(args...) -} - -func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorln(args...) - } -} - -func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalln(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicln(args...) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go deleted file mode 100644 index 43ee12e9..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go +++ /dev/null @@ -1,94 +0,0 @@ -package logrus - -import ( - "fmt" - "log" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint8 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" - } - - return "unknown" -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch lvl { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var _ StdLogger = &log.Logger{} - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go deleted file mode 100644 index e8719b09..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go +++ /dev/null @@ -1,301 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "strconv" - "strings" - "sync" - "testing" - - "github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert" -) - -func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - log(logger) - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assertions(fields) -} - -func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { - var buffer bytes.Buffer - - logger := New() - logger.Out = &buffer - logger.Formatter = &TextFormatter{ - DisableColors: true, - } - - log(logger) - - fields := make(map[string]string) - for _, kv := range strings.Split(buffer.String(), " ") { - if !strings.Contains(kv, "=") { - continue - } - kvArr := strings.Split(kv, "=") - key := strings.TrimSpace(kvArr[0]) - val := kvArr[1] - if kvArr[1][0] == '"' { - var err error - val, err = strconv.Unquote(val) - assert.NoError(t, err) - } - fields[key] = val - } - assertions(fields) -} - -func TestPrint(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestInfo(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestWarn(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Warn("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "warning") - }) -} - -func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test test") - }) -} - -func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test 10") - }) -} - -func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "testtest") - }) -} - -func TestWithFieldsShouldAllowAssignments(t *testing.T) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - localLog := logger.WithFields(Fields{ - "key1": "value1", - }) - - localLog.WithField("key2", "value2").Info("test") - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assert.Equal(t, "value2", fields["key2"]) - assert.Equal(t, "value1", fields["key1"]) - - buffer = bytes.Buffer{} - fields = Fields{} - localLog.Info("test") - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - _, ok := fields["key2"] - assert.Equal(t, false, ok) - assert.Equal(t, "value1", fields["key1"]) -} - -func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - }) -} - -func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["fields.msg"], "hello") - }) -} - -func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("time", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["fields.time"], "hello") - }) -} - -func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("level", 1).Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["level"], "info") - assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only - }) -} - -func TestDefaultFieldsAreNotPrefixed(t *testing.T) { - LogAndAssertText(t, func(log *Logger) { - ll := log.WithField("herp", "derp") - ll.Info("hello") - ll.Info("bye") - }, func(fields map[string]string) { - for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { - if _, ok := fields[fieldName]; ok { - t.Fatalf("should not have prefixed %q: %v", fieldName, fields) - } - } - }) -} - -func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { - - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - llog := logger.WithField("context", "eating raw fish") - - llog.Info("looks delicious") - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded first message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "looks delicious") - assert.Equal(t, fields["context"], "eating raw fish") - - buffer.Reset() - - llog.Warn("omg it is!") - - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded second message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "omg it is!") - assert.Equal(t, fields["context"], "eating raw fish") - assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") - -} - -func TestConvertLevelToString(t *testing.T) { - assert.Equal(t, "debug", DebugLevel.String()) - assert.Equal(t, "info", InfoLevel.String()) - assert.Equal(t, "warning", WarnLevel.String()) - assert.Equal(t, "error", ErrorLevel.String()) - assert.Equal(t, "fatal", FatalLevel.String()) - assert.Equal(t, "panic", PanicLevel.String()) -} - -func TestParseLevel(t *testing.T) { - l, err := ParseLevel("panic") - assert.Nil(t, err) - assert.Equal(t, PanicLevel, l) - - l, err = ParseLevel("fatal") - assert.Nil(t, err) - assert.Equal(t, FatalLevel, l) - - l, err = ParseLevel("error") - assert.Nil(t, err) - assert.Equal(t, ErrorLevel, l) - - l, err = ParseLevel("warn") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("warning") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("info") - assert.Nil(t, err) - assert.Equal(t, InfoLevel, l) - - l, err = ParseLevel("debug") - assert.Nil(t, err) - assert.Equal(t, DebugLevel, l) - - l, err = ParseLevel("invalid") - assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) -} - -func TestGetSetLevelRace(t *testing.T) { - wg := sync.WaitGroup{} - for i := 0; i < 100; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - if i%2 == 0 { - SetLevel(InfoLevel) - } else { - GetLevel() - } - }(i) - - } - wg.Wait() -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go deleted file mode 100644 index 71f8d67a..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index 4bb53760..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd netbsd dragonfly - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 2e09f6f7..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 06ef2023..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,161 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "runtime" - "sort" - "strings" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 34 - gray = 37 -) - -var ( - baseTimestamp time.Time - isTerminal bool -) - -func init() { - baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) -} - -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool -} - -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var keys []string = make([]string, 0, len(entry.Data)) - for k := range entry.Data { - keys = append(keys, k) - } - - if !f.DisableSorting { - sort.Strings(keys) - } - - b := &bytes.Buffer{} - - prefixFieldClashes(entry.Data) - - isColorTerminal := isTerminal && (runtime.GOOS != "windows") - isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - if isColored { - f.printColored(b, entry, keys, timestampFormat) - } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - if entry.Message != "" { - f.appendKeyValue(b, "msg", entry.Message) - } - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { - var levelColor int - switch entry.Level { - case DebugLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) - } - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) - } -} - -func needsQuoting(text string) bool { - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { - return false - } - } - return true -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - - b.WriteString(key) - b.WriteByte('=') - - switch value := value.(type) { - case string: - if needsQuoting(value) { - b.WriteString(value) - } else { - fmt.Fprintf(b, "%q", value) - } - case error: - errmsg := value.Error() - if needsQuoting(errmsg) { - b.WriteString(errmsg) - } else { - fmt.Fprintf(b, "%q", value) - } - default: - fmt.Fprint(b, value) - } - - b.WriteByte(' ') -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go deleted file mode 100644 index e25a44f6..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package logrus - -import ( - "bytes" - "errors" - "testing" - "time" -) - -func TestQuoting(t *testing.T) { - tf := &TextFormatter{DisableColors: true} - - checkQuoting := func(q bool, value interface{}) { - b, _ := tf.Format(WithField("test", value)) - idx := bytes.Index(b, ([]byte)("test=")) - cont := bytes.Contains(b[idx+5:], []byte{'"'}) - if cont != q { - if q { - t.Errorf("quoting expected for: %#v", value) - } else { - t.Errorf("quoting not expected for: %#v", value) - } - } - } - - checkQuoting(false, "abcd") - checkQuoting(false, "v1.0") - checkQuoting(false, "1234567890") - checkQuoting(true, "/foobar") - checkQuoting(true, "x y") - checkQuoting(true, "x,y") - checkQuoting(false, errors.New("invalid")) - checkQuoting(true, errors.New("invalid argument")) -} - -func TestTimestampFormat(t *testing.T) { - checkTimeStr := func(format string) { - customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} - customStr, _ := customFormatter.Format(WithField("test", "test")) - timeStart := bytes.Index(customStr, ([]byte)("time=")) - timeEnd := bytes.Index(customStr, ([]byte)("level=")) - timeStr := customStr[timeStart+5 : timeEnd-1] - if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { - timeStr = timeStr[1 : len(timeStr)-1] - } - if format == "" { - format = time.RFC3339 - } - _, e := time.Parse(format, (string)(timeStr)) - if e != nil { - t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) - } - } - - checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") - checkTimeStr("Mon Jan _2 15:04:05 2006") - checkTimeStr("") -} - -// TODO add tests for sorting etc., this requires a parser for the text -// formatter output. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go deleted file mode 100644 index 1e30b1c7..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go +++ /dev/null @@ -1,31 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -func (logger *Logger) Writer() *io.PipeWriter { - reader, writer := io.Pipe() - - go logger.writerScanner(reader) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (logger *Logger) writerScanner(reader *io.PipeReader) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - logger.Print(scanner.Text()) - } - if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go deleted file mode 100644 index ba8b4f20..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go +++ /dev/null @@ -1,67 +0,0 @@ -package opts - -import ( - "bufio" - "fmt" - "os" - "strings" -) - -// ParseEnvFile reads a file with environment variables enumerated by lines -// -// ``Environment variable names used by the utilities in the Shell and -// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase -// letters, digits, and the '_' (underscore) from the characters defined in -// Portable Character Set and do not begin with a digit. *But*, other -// characters may be permitted by an implementation; applications shall -// tolerate the presence of such names.'' -// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html -// -// As of #16585, it's up to application inside docker to validate or not -// environment variables, that's why we just strip leading whitespace and -// nothing more. -func ParseEnvFile(filename string) ([]string, error) { - fh, err := os.Open(filename) - if err != nil { - return []string{}, err - } - defer fh.Close() - - lines := []string{} - scanner := bufio.NewScanner(fh) - for scanner.Scan() { - // trim the line from all leading whitespace first - line := strings.TrimLeft(scanner.Text(), whiteSpaces) - // line is not empty, and not starting with '#' - if len(line) > 0 && !strings.HasPrefix(line, "#") { - data := strings.SplitN(line, "=", 2) - - // trim the front of a variable, but nothing else - variable := strings.TrimLeft(data[0], whiteSpaces) - if strings.ContainsAny(variable, whiteSpaces) { - return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} - } - - if len(data) > 1 { - - // pass the value through, no trimming - lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) - } else { - // if only a pass-through variable is given, clean it up. - lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) - } - } - } - return lines, scanner.Err() -} - -var whiteSpaces = " \t" - -// ErrBadEnvVariable typed error for bad environment variable -type ErrBadEnvVariable struct { - msg string -} - -func (e ErrBadEnvVariable) Error() string { - return fmt.Sprintf("poorly formatted environment: %s", e.msg) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go deleted file mode 100644 index a172267b..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package opts - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "reflect" - "strings" - "testing" -) - -func tmpFileWithContent(content string, t *testing.T) string { - tmpFile, err := ioutil.TempFile("", "envfile-test") - if err != nil { - t.Fatal(err) - } - defer tmpFile.Close() - - tmpFile.WriteString(content) - return tmpFile.Name() -} - -// Test ParseEnvFile for a file with a few well formatted lines -func TestParseEnvFileGoodFile(t *testing.T) { - content := `foo=bar - baz=quux -# comment - -_foobar=foobaz -with.dots=working -and_underscore=working too -` - // Adding a newline + a line with pure whitespace. - // This is being done like this instead of the block above - // because it's common for editors to trim trailing whitespace - // from lines, which becomes annoying since that's the - // exact thing we need to test. - content += "\n \t " - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - lines, err := ParseEnvFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - expectedLines := []string{ - "foo=bar", - "baz=quux", - "_foobar=foobaz", - "with.dots=working", - "and_underscore=working too", - } - - if !reflect.DeepEqual(lines, expectedLines) { - t.Fatal("lines not equal to expected_lines") - } -} - -// Test ParseEnvFile for an empty file -func TestParseEnvFileEmptyFile(t *testing.T) { - tmpFile := tmpFileWithContent("", t) - defer os.Remove(tmpFile) - - lines, err := ParseEnvFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - if len(lines) != 0 { - t.Fatal("lines not empty; expected empty") - } -} - -// Test ParseEnvFile for a non existent file -func TestParseEnvFileNonExistentFile(t *testing.T) { - _, err := ParseEnvFile("foo_bar_baz") - if err == nil { - t.Fatal("ParseEnvFile succeeded; expected failure") - } - if _, ok := err.(*os.PathError); !ok { - t.Fatalf("Expected a PathError, got [%v]", err) - } -} - -// Test ParseEnvFile for a badly formatted file -func TestParseEnvFileBadlyFormattedFile(t *testing.T) { - content := `foo=bar - f =quux -` - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatalf("Expected a ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected a ErrBadEnvVariable, got [%v]", err) - } - expectedMessage := "poorly formatted environment: variable 'f ' has white spaces" - if err.Error() != expectedMessage { - t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) - } -} - -// Test ParseEnvFile for a file with a line exeeding bufio.MaxScanTokenSize -func TestParseEnvFileLineTooLongFile(t *testing.T) { - content := strings.Repeat("a", bufio.MaxScanTokenSize+42) - content = fmt.Sprint("foo=", content) - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatal("ParseEnvFile succeeded; expected failure") - } -} - -// ParseEnvFile with a random file, pass through -func TestParseEnvFileRandomFile(t *testing.T) { - content := `first line -another invalid line` - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - - if err == nil { - t.Fatalf("Expected a ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected a ErrBadEnvvariable, got [%v]", err) - } - expectedMessage := "poorly formatted environment: variable 'first line' has white spaces" - if err.Error() != expectedMessage { - t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go deleted file mode 100644 index 611407a9..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package opts - -import "fmt" - -// DefaultHost constant defines the default host string used by docker on other hosts than Windows -var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go deleted file mode 100644 index ec52e9a7..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build windows - -package opts - -// DefaultHost constant defines the default host string used by docker on Windows -var DefaultHost = DefaultTCPHost diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go deleted file mode 100644 index d787b56c..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go +++ /dev/null @@ -1,42 +0,0 @@ -package opts - -import ( - "fmt" - "net" -) - -// IPOpt holds an IP. It is used to store values from CLI flags. -type IPOpt struct { - *net.IP -} - -// NewIPOpt creates a new IPOpt from a reference net.IP and a -// string representation of an IP. If the string is not a valid -// IP it will fallback to the specified reference. -func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { - o := &IPOpt{ - IP: ref, - } - o.Set(defaultVal) - return o -} - -// Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parsable as an IP address it returns an error. -func (o *IPOpt) Set(val string) error { - ip := net.ParseIP(val) - if ip == nil { - return fmt.Errorf("%s is not an ip address", val) - } - *o.IP = ip - return nil -} - -// String returns the IP address stored in the IPOpt. If stored IP is a -// nil pointer, it returns an empty string. -func (o *IPOpt) String() string { - if *o.IP == nil { - return "" - } - return o.IP.String() -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go deleted file mode 100644 index 1027d84a..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package opts - -import ( - "net" - "testing" -) - -func TestIpOptString(t *testing.T) { - addresses := []string{"", "0.0.0.0"} - var ip net.IP - - for _, address := range addresses { - stringAddress := NewIPOpt(&ip, address).String() - if stringAddress != address { - t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) - } - } -} - -func TestNewIpOptInvalidDefaultVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - defaultVal := "Not an ip" - - ipOpt := NewIPOpt(&ip, defaultVal) - - expected := "127.0.0.1" - if ipOpt.String() != expected { - t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) - } -} - -func TestNewIpOptValidDefaultVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - defaultVal := "192.168.1.1" - - ipOpt := NewIPOpt(&ip, defaultVal) - - expected := "192.168.1.1" - if ipOpt.String() != expected { - t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) - } -} - -func TestIpOptSetInvalidVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - ipOpt := &IPOpt{IP: &ip} - - invalidIP := "invalid ip" - expectedError := "invalid ip is not an ip address" - err := ipOpt.Set(invalidIP) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go deleted file mode 100644 index df85a09e..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go +++ /dev/null @@ -1,360 +0,0 @@ -package opts - -import ( - "fmt" - "net" - "os" - "path" - "regexp" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers" -) - -var ( - alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) - domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) - // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 - DefaultHTTPHost = "localhost" - - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// - // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter - // is not supplied. A better longer term solution would be to use a named - // pipe as the default on the Windows daemon. - // These are the IANA registered port numbers for use with Docker - // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker - DefaultHTTPPort = 2375 // Default HTTP Port - // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled - DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port - // DefaultUnixSocket Path for the unix socket. - // Docker daemon by default always listens on the default unix socket - DefaultUnixSocket = "/var/run/docker.sock" - // DefaultTCPHost constant defines the default host string used by docker on Windows - DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) - // DefaultTLSHost constant defines the default host string used by docker for TLS sockets - DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) -) - -// ListOpts holds a list of values and a validation function. -type ListOpts struct { - values *[]string - validator ValidatorFctType -} - -// NewListOpts creates a new ListOpts with the specified validator. -func NewListOpts(validator ValidatorFctType) ListOpts { - var values []string - return *NewListOptsRef(&values, validator) -} - -// NewListOptsRef creates a new ListOpts with the specified values and validator. -func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { - return &ListOpts{ - values: values, - validator: validator, - } -} - -func (opts *ListOpts) String() string { - return fmt.Sprintf("%v", []string((*opts.values))) -} - -// Set validates if needed the input value and add it to the -// internal slice. -func (opts *ListOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - (*opts.values) = append((*opts.values), value) - return nil -} - -// Delete removes the specified element from the slice. -func (opts *ListOpts) Delete(key string) { - for i, k := range *opts.values { - if k == key { - (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) - return - } - } -} - -// GetMap returns the content of values in a map in order to avoid -// duplicates. -func (opts *ListOpts) GetMap() map[string]struct{} { - ret := make(map[string]struct{}) - for _, k := range *opts.values { - ret[k] = struct{}{} - } - return ret -} - -// GetAll returns the values of slice. -func (opts *ListOpts) GetAll() []string { - return (*opts.values) -} - -// Get checks the existence of the specified key. -func (opts *ListOpts) Get(key string) bool { - for _, k := range *opts.values { - if k == key { - return true - } - } - return false -} - -// Len returns the amount of element in the slice. -func (opts *ListOpts) Len() int { - return len((*opts.values)) -} - -//MapOpts holds a map of values and a validation function. -type MapOpts struct { - values map[string]string - validator ValidatorFctType -} - -// Set validates if needed the input value and add it to the -// internal map, by splitting on '='. -func (opts *MapOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - vals := strings.SplitN(value, "=", 2) - if len(vals) == 1 { - (opts.values)[vals[0]] = "" - } else { - (opts.values)[vals[0]] = vals[1] - } - return nil -} - -// GetAll returns the values of MapOpts as a map. -func (opts *MapOpts) GetAll() map[string]string { - return opts.values -} - -func (opts *MapOpts) String() string { - return fmt.Sprintf("%v", map[string]string((opts.values))) -} - -// NewMapOpts creates a new MapOpts with the specified map of values and a validator. -func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { - if values == nil { - values = make(map[string]string) - } - return &MapOpts{ - values: values, - validator: validator, - } -} - -// ValidatorFctType defines a validator function that returns a validated string and/or an error. -type ValidatorFctType func(val string) (string, error) - -// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error -type ValidatorFctListType func(val string) ([]string, error) - -// ValidateAttach validates that the specified string is a valid attach option. -func ValidateAttach(val string) (string, error) { - s := strings.ToLower(val) - for _, str := range []string{"stdin", "stdout", "stderr"} { - if s == str { - return s, nil - } - } - return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") -} - -// ValidateLink validates that the specified string has a valid link format (containerName:alias). -func ValidateLink(val string) (string, error) { - if _, _, err := parsers.ParseLink(val); err != nil { - return val, err - } - return val, nil -} - -// ValidDeviceMode checks if the mode for device is valid or not. -// Valid mode is a composition of r (read), w (write), and m (mknod). -func ValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ - 'r': true, - 'w': true, - 'm': true, - } - if mode == "" { - return false - } - for _, c := range mode { - if !legalDeviceMode[c] { - return false - } - legalDeviceMode[c] = false - } - return true -} - -// ValidateDevice validates a path for devices -// It will make sure 'val' is in the form: -// [host-dir:]container-path[:mode] -// It also validates the device mode. -func ValidateDevice(val string) (string, error) { - return validatePath(val, ValidDeviceMode) -} - -func validatePath(val string, validator func(string) bool) (string, error) { - var containerPath string - var mode string - - if strings.Count(val, ":") > 2 { - return val, fmt.Errorf("bad format for path: %s", val) - } - - split := strings.SplitN(val, ":", 3) - if split[0] == "" { - return val, fmt.Errorf("bad format for path: %s", val) - } - switch len(split) { - case 1: - containerPath = split[0] - val = path.Clean(containerPath) - case 2: - if isValid := validator(split[1]); isValid { - containerPath = split[0] - mode = split[1] - val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) - } else { - containerPath = split[1] - val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) - } - case 3: - containerPath = split[1] - mode = split[2] - if isValid := validator(split[2]); !isValid { - return val, fmt.Errorf("bad mode specified: %s", mode) - } - val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) - } - - if !path.IsAbs(containerPath) { - return val, fmt.Errorf("%s is not an absolute path", containerPath) - } - return val, nil -} - -// ValidateEnv validates an environment variable and returns it. -// If no value is specified, it returns the current value using os.Getenv. -// -// As on ParseEnvFile and related to #16585, environment variable names -// are not validate what so ever, it's up to application inside docker -// to validate them or not. -func ValidateEnv(val string) (string, error) { - arr := strings.Split(val, "=") - if len(arr) > 1 { - return val, nil - } - if !doesEnvExist(val) { - return val, nil - } - return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil -} - -// ValidateIPAddress validates an Ip address. -func ValidateIPAddress(val string) (string, error) { - var ip = net.ParseIP(strings.TrimSpace(val)) - if ip != nil { - return ip.String(), nil - } - return "", fmt.Errorf("%s is not an ip address", val) -} - -// ValidateMACAddress validates a MAC address. -func ValidateMACAddress(val string) (string, error) { - _, err := net.ParseMAC(strings.TrimSpace(val)) - if err != nil { - return "", err - } - return val, nil -} - -// ValidateDNSSearch validates domain for resolvconf search configuration. -// A zero length domain is represented by a dot (.). -func ValidateDNSSearch(val string) (string, error) { - if val = strings.Trim(val, " "); val == "." { - return val, nil - } - return validateDomain(val) -} - -func validateDomain(val string) (string, error) { - if alphaRegexp.FindString(val) == "" { - return "", fmt.Errorf("%s is not a valid domain", val) - } - ns := domainRegexp.FindSubmatch([]byte(val)) - if len(ns) > 0 && len(ns[1]) < 255 { - return string(ns[1]), nil - } - return "", fmt.Errorf("%s is not a valid domain", val) -} - -// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). -func ValidateExtraHost(val string) (string, error) { - // allow for IPv6 addresses in extra hosts by only splitting on first ":" - arr := strings.SplitN(val, ":", 2) - if len(arr) != 2 || len(arr[0]) == 0 { - return "", fmt.Errorf("bad format for add-host: %q", val) - } - if _, err := ValidateIPAddress(arr[1]); err != nil { - return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) - } - return val, nil -} - -// ValidateLabel validates that the specified string is a valid label, and returns it. -// Labels are in the form on key=value. -func ValidateLabel(val string) (string, error) { - if strings.Count(val, "=") < 1 { - return "", fmt.Errorf("bad attribute format: %s", val) - } - return val, nil -} - -// ValidateHost validates that the specified string is a valid host and returns it. -func ValidateHost(val string) (string, error) { - _, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val) - if err != nil { - return val, err - } - // Note: unlike most flag validators, we don't return the mutated value here - // we need to know what the user entered later (using ParseHost) to adjust for tls - return val, nil -} - -// ParseHost and set defaults for a Daemon host string -func ParseHost(defaultHost, val string) (string, error) { - host, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val) - if err != nil { - return val, err - } - return host, nil -} - -func doesEnvExist(name string) bool { - for _, entry := range os.Environ() { - parts := strings.SplitN(entry, "=", 2) - if parts[0] == name { - return true - } - } - return false -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go deleted file mode 100644 index e02d3f8e..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go +++ /dev/null @@ -1,427 +0,0 @@ -package opts - -import ( - "fmt" - "os" - "runtime" - "strings" - "testing" -) - -func TestValidateIPAddress(t *testing.T) { - if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { - t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { - t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) - } - -} - -func TestMapOpts(t *testing.T) { - tmpMap := make(map[string]string) - o := NewMapOpts(tmpMap, logOptsValidator) - o.Set("max-size=1") - if o.String() != "map[max-size:1]" { - t.Errorf("%s != [map[max-size:1]", o.String()) - } - - o.Set("max-file=2") - if len(tmpMap) != 2 { - t.Errorf("map length %d != 2", len(tmpMap)) - } - - if tmpMap["max-file"] != "2" { - t.Errorf("max-file = %s != 2", tmpMap["max-file"]) - } - - if tmpMap["max-size"] != "1" { - t.Errorf("max-size = %s != 1", tmpMap["max-size"]) - } - if o.Set("dummy-val=3") == nil { - t.Errorf("validator is not being called") - } -} - -func TestValidateMACAddress(t *testing.T) { - if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { - t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) - } - - if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { - t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") - } - - if _, err := ValidateMACAddress(`random invalid string`); err == nil { - t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") - } -} - -func TestListOptsWithoutValidator(t *testing.T) { - o := NewListOpts(nil) - o.Set("foo") - if o.String() != "[foo]" { - t.Errorf("%s != [foo]", o.String()) - } - o.Set("bar") - if o.Len() != 2 { - t.Errorf("%d != 2", o.Len()) - } - o.Set("bar") - if o.Len() != 3 { - t.Errorf("%d != 3", o.Len()) - } - if !o.Get("bar") { - t.Error("o.Get(\"bar\") == false") - } - if o.Get("baz") { - t.Error("o.Get(\"baz\") == true") - } - o.Delete("foo") - if o.String() != "[bar bar]" { - t.Errorf("%s != [bar bar]", o.String()) - } - listOpts := o.GetAll() - if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { - t.Errorf("Expected [[bar bar]], got [%v]", listOpts) - } - mapListOpts := o.GetMap() - if len(mapListOpts) != 1 { - t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) - } - -} - -func TestListOptsWithValidator(t *testing.T) { - // Re-using logOptsvalidator (used by MapOpts) - o := NewListOpts(logOptsValidator) - o.Set("foo") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) - } - o.Set("foo=bar") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) - } - o.Set("max-file=2") - if o.Len() != 1 { - t.Errorf("%d != 1", o.Len()) - } - if !o.Get("max-file=2") { - t.Error("o.Get(\"max-file=2\") == false") - } - if o.Get("baz") { - t.Error("o.Get(\"baz\") == true") - } - o.Delete("max-file=2") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) - } -} - -func TestValidateDNSSearch(t *testing.T) { - valid := []string{ - `.`, - `a`, - `a.`, - `1.foo`, - `17.foo`, - `foo.bar`, - `foo.bar.baz`, - `foo.bar.`, - `foo.bar.baz`, - `foo1.bar2`, - `foo1.bar2.baz`, - `1foo.2bar.`, - `1foo.2bar.baz`, - `foo-1.bar-2`, - `foo-1.bar-2.baz`, - `foo-1.bar-2.`, - `foo-1.bar-2.baz`, - `1-foo.2-bar`, - `1-foo.2-bar.baz`, - `1-foo.2-bar.`, - `1-foo.2-bar.baz`, - } - - invalid := []string{ - ``, - ` `, - ` `, - `17`, - `17.`, - `.17`, - `17-.`, - `17-.foo`, - `.foo`, - `foo-.bar`, - `-foo.bar`, - `foo.bar-`, - `foo.bar-.baz`, - `foo.-bar`, - `foo.-bar.baz`, - `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, - } - - for _, domain := range valid { - if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { - t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) - } - } - - for _, domain := range invalid { - if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { - t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) - } - } -} - -func TestValidateExtraHosts(t *testing.T) { - valid := []string{ - `myhost:192.168.0.1`, - `thathost:10.0.2.1`, - `anipv6host:2003:ab34:e::1`, - `ipv6local:::1`, - } - - invalid := map[string]string{ - `myhost:192.notanipaddress.1`: `invalid IP`, - `thathost-nosemicolon10.0.0.1`: `bad format`, - `anipv6host:::::1`: `invalid IP`, - `ipv6local:::0::`: `invalid IP`, - } - - for _, extrahost := range valid { - if _, err := ValidateExtraHost(extrahost); err != nil { - t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) - } - } - - for extraHost, expectedError := range invalid { - if _, err := ValidateExtraHost(extraHost); err == nil { - t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) - } - } - } -} - -func TestValidateAttach(t *testing.T) { - valid := []string{ - "stdin", - "stdout", - "stderr", - "STDIN", - "STDOUT", - "STDERR", - } - if _, err := ValidateAttach("invalid"); err == nil { - t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") - } - - for _, attach := range valid { - value, err := ValidateAttach(attach) - if err != nil { - t.Fatal(err) - } - if value != strings.ToLower(attach) { - t.Fatalf("Expected [%v], got [%v]", attach, value) - } - } -} - -func TestValidateLink(t *testing.T) { - valid := []string{ - "name", - "dcdfbe62ecd0:alias", - "7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da", - "angry_torvalds:linus", - } - invalid := map[string]string{ - "": "empty string specified for links", - "too:much:of:it": "bad format for links: too:much:of:it", - } - - for _, link := range valid { - if _, err := ValidateLink(link); err != nil { - t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err) - } - } - - for link, expectedError := range invalid { - if _, err := ValidateLink(link); err == nil { - t.Fatalf("ValidateLink(`%q`) should have failed validation", link) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError) - } - } - } -} - -func TestValidateDevice(t *testing.T) { - valid := []string{ - "/home", - "/home:/home", - "/home:/something/else", - "/with space", - "/home:/with space", - "relative:/absolute-path", - "hostPath:/containerPath:r", - "/hostPath:/containerPath:rw", - "/hostPath:/containerPath:mrw", - } - invalid := map[string]string{ - "": "bad format for path: ", - "./": "./ is not an absolute path", - "../": "../ is not an absolute path", - "/:../": "../ is not an absolute path", - "/:path": "path is not an absolute path", - ":": "bad format for path: :", - "/tmp:": " is not an absolute path", - ":test": "bad format for path: :test", - ":/test": "bad format for path: :/test", - "tmp:": " is not an absolute path", - ":test:": "bad format for path: :test:", - "::": "bad format for path: ::", - ":::": "bad format for path: :::", - "/tmp:::": "bad format for path: /tmp:::", - ":/tmp::": "bad format for path: :/tmp::", - "path:ro": "ro is not an absolute path", - "path:rr": "rr is not an absolute path", - "a:/b:ro": "bad mode specified: ro", - "a:/b:rr": "bad mode specified: rr", - } - - for _, path := range valid { - if _, err := ValidateDevice(path); err != nil { - t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) - } - } - - for path, expectedError := range invalid { - if _, err := ValidateDevice(path); err == nil { - t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) - } else { - if err.Error() != expectedError { - t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) - } - } - } -} - -func TestValidateEnv(t *testing.T) { - valids := map[string]string{ - "a": "a", - "something": "something", - "_=a": "_=a", - "env1=value1": "env1=value1", - "_env1=value1": "_env1=value1", - "env2=value2=value3": "env2=value2=value3", - "env3=abc!qwe": "env3=abc!qwe", - "env_4=value 4": "env_4=value 4", - "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), - "PATH=something": "PATH=something", - "asd!qwe": "asd!qwe", - "1asd": "1asd", - "123": "123", - "some space": "some space", - " some space before": " some space before", - "some space after ": "some space after ", - } - for value, expected := range valids { - actual, err := ValidateEnv(value) - if err != nil { - t.Fatal(err) - } - if actual != expected { - t.Fatalf("Expected [%v], got [%v]", expected, actual) - } - } -} - -func TestValidateLabel(t *testing.T) { - if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { - t.Fatalf("Expected an error [bad attribute format: label], go %v", err) - } - if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { - t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) - } - // Validate it's working with more than one = - if actual, err := ValidateLabel("key1=value1=value2"); err != nil { - t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) - } - // Validate it's working with one more - if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { - t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) - } -} - -func TestParseHost(t *testing.T) { - invalid := map[string]string{ - "anything": "Invalid bind address format: anything", - "something with spaces": "Invalid bind address format: something with spaces", - "://": "Invalid bind address format: ://", - "unknown://": "Invalid bind address format: unknown://", - "tcp://:port": "Invalid bind address format: :port", - "tcp://invalid": "Invalid bind address format: invalid", - "tcp://invalid:port": "Invalid bind address format: invalid:port", - } - const defaultHTTPHost = "tcp://127.0.0.1:2375" - var defaultHOST = "unix:///var/run/docker.sock" - - if runtime.GOOS == "windows" { - defaultHOST = defaultHTTPHost - } - valid := map[string]string{ - "": defaultHOST, - "fd://": "fd://", - "fd://something": "fd://something", - "tcp://host:": "tcp://host:2375", - "tcp://": "tcp://localhost:2375", - "tcp://:2375": "tcp://localhost:2375", // default ip address - "tcp://:2376": "tcp://localhost:2376", // default ip address - "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", - "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", - "tcp://192.168:8080": "tcp://192.168:8080", - "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P - "tcp://docker.com:2375": "tcp://docker.com:2375", - "unix://": "unix:///var/run/docker.sock", // default unix:// value - "unix://path/to/socket": "unix://path/to/socket", - } - - for value, errorMessage := range invalid { - if _, err := ParseHost(defaultHTTPHost, value); err == nil || err.Error() != errorMessage { - t.Fatalf("Expected an error for %v with [%v], got [%v]", value, errorMessage, err) - } - } - for value, expected := range valid { - if actual, err := ParseHost(defaultHTTPHost, value); err != nil || actual != expected { - t.Fatalf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) - } - } -} - -func logOptsValidator(val string) (string, error) { - allowedKeys := map[string]string{"max-size": "1", "max-file": "2"} - vals := strings.Split(val, "=") - if allowedKeys[vals[0]] != "" { - return val, nil - } - return "", fmt.Errorf("invalid key %s", vals[0]) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go deleted file mode 100644 index 7cd48079..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go +++ /dev/null @@ -1,52 +0,0 @@ -package opts - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit" -) - -// UlimitOpt defines a map of Ulimits -type UlimitOpt struct { - values *map[string]*ulimit.Ulimit -} - -// NewUlimitOpt creates a new UlimitOpt -func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt { - if ref == nil { - ref = &map[string]*ulimit.Ulimit{} - } - return &UlimitOpt{ref} -} - -// Set validates a Ulimit and sets its name as a key in UlimitOpt -func (o *UlimitOpt) Set(val string) error { - l, err := ulimit.Parse(val) - if err != nil { - return err - } - - (*o.values)[l.Name] = l - - return nil -} - -// String returns Ulimit values as a string. -func (o *UlimitOpt) String() string { - var out []string - for _, v := range *o.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to Ulimits. -func (o *UlimitOpt) GetList() []*ulimit.Ulimit { - var ulimits []*ulimit.Ulimit - for _, v := range *o.values { - ulimits = append(ulimits, v) - } - - return ulimits -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit_test.go deleted file mode 100644 index ad284e75..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package opts - -import ( - "testing" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit" -) - -func TestUlimitOpt(t *testing.T) { - ulimitMap := map[string]*ulimit.Ulimit{ - "nofile": {"nofile", 1024, 512}, - } - - ulimitOpt := NewUlimitOpt(&ulimitMap) - - expected := "[nofile=512:1024]" - if ulimitOpt.String() != expected { - t.Fatalf("Expected %v, got %v", expected, ulimitOpt) - } - - // Valid ulimit append to opts - if err := ulimitOpt.Set("core=1024:1024"); err != nil { - t.Fatal(err) - } - - // Invalid ulimit type returns an error and do not append to opts - if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { - t.Fatalf("Expected error on invalid ulimit type") - } - expected = "[nofile=512:1024 core=1024:1024]" - expected2 := "[core=1024:1024 nofile=512:1024]" - result := ulimitOpt.String() - if result != expected && result != expected2 { - t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) - } - - // And test GetList - ulimits := ulimitOpt.GetList() - if len(ulimits) != 2 { - t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md deleted file mode 100644 index 7307d969..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md +++ /dev/null @@ -1 +0,0 @@ -This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go deleted file mode 100644 index fb3327f1..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go +++ /dev/null @@ -1,1016 +0,0 @@ -package archive - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "syscall" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -type ( - // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. - Archive io.ReadCloser - // Reader is a type of io.Reader. - Reader io.Reader - // Compression is the state represtents if compressed or not. - Compression int - // TarChownOptions wraps the chown options UID and GID. - TarChownOptions struct { - UID, GID int - } - // TarOptions wraps the tar options. - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - NoLchown bool - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ChownOpts *TarChownOptions - IncludeSourceDir bool - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - } - - // Archiver allows the reuse of most utility functions of this package - // with a pluggable Untar function. Also, to facilitate the passing of - // specific id mappings for untar, an archiver can be created with maps - // which will then be passed to Untar operations - Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - } - - // breakoutError is used to differentiate errors related to breaking out - // When testing archive breakout in the unit tests, this error is expected - // in order for the test to pass. - breakoutError error -) - -var ( - // ErrNotImplemented is the error message of function not implemented. - ErrNotImplemented = errors.New("Function not implemented") - defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} -) - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Bzip2 is bzip2 compression algorithm. - Bzip2 - // Gzip is gzip compression algorithm. - Gzip - // Xz is xz compression algorithm. - Xz -) - -// IsArchive checks if it is a archive by the header. -func IsArchive(header []byte) bool { - compression := DetectCompression(header) - if compression != Uncompressed { - return true - } - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Bzip2: {0x42, 0x5A, 0x68}, - Gzip: {0x1F, 0x8B, 0x08}, - Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - if len(source) < len(m) { - logrus.Debugf("Len too short") - continue - } - if bytes.Compare(m, source[:len(m)]) == 0 { - return compression - } - } - return Uncompressed -} - -func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return cmdStream(exec.Command(args[0], args[1:]...), archive) -} - -// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - p := pools.BufioReader32KPool - buf := p.Get(archive) - bs, err := buf.Peek(10) - if err != nil { - return nil, err - } - - compression := DetectCompression(bs) - switch compression { - case Uncompressed: - readBufWrapper := p.NewReadCloserWrapper(buf, buf) - return readBufWrapper, nil - case Gzip: - gzReader, err := gzip.NewReader(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) - return readBufWrapper, nil - case Bzip2: - bz2Reader := bzip2.NewReader(buf) - readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) - return readBufWrapper, nil - case Xz: - xzReader, chdone, err := xzDecompress(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { - <-chdone - return readBufWrapper.Close() - }), nil - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - } - return "" -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap -} - -// canonicalTarName provides a platform-independent and consistent posix-style -//path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) (string, error) { - name, err := CanonicalTarNameForPath(name) - if err != nil { - return "", err - } - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name, nil -} - -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - link := "" - if fi.Mode()&os.ModeSymlink != 0 { - if link, err = os.Readlink(path); err != nil { - return err - } - } - - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return err - } - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - - inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) - if err != nil { - return err - } - - // if it's not a directory and has more than 1 link, - // it's hardlinked, so set the type flag accordingly - if !fi.IsDir() && hasHardlinks(fi) { - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - - //handle re-mapping container ID mappings back to host ID mappings before - //writing tar headers/files - if ta.UIDMaps != nil || ta.GIDMaps != nil { - uid, gid, err := getFileUIDGID(fi.Sys()) - if err != nil { - return err - } - xUID, err := idtools.ToContainer(uid, ta.UIDMaps) - if err != nil { - return err - } - xGID, err := idtools.ToContainer(gid, ta.GIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - hdr.Gid = xGID - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg { - file, err := os.Open(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg, tar.TypeRegA: - // Source is regular file - file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(file, reader); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeLink: - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) - } - if err := os.Link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - logrus.Debugf("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) - } - - // Lchown is not supported on Windows. - if Lchown && runtime.GOOS != "windows" { - if chownOpts == nil { - chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} - } - if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - return err - } - } - - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - return err - } - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if err := handleLChmod(hdr, path, hdrInfo); err != nil { - return err - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarWithOptions(path, &TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - - // Fix the source path to work with long path names. This is a no-op - // on platforms other than Windows. - srcPath = fixVolumePathPrefix(srcPath) - - patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) - - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(compressWriter), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: options.UIDMaps, - GIDMaps: options.GIDMaps, - } - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - logrus.Debugf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - logrus.Debugf("Can't close pipe writer: %s", err) - } - }() - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(srcPath) - srcPath = dir - options.IncludeFiles = []string{base} - } - - if len(options.IncludeFiles) == 0 { - options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - for _, include := range options.IncludeFiles { - rebaseName := options.RebaseNames[include] - - walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) - if err != nil { - logrus.Debugf("Error matching %s: %v", relFilePath, err) - return err - } - } - - if skip { - if !exceptions && f.IsDir() { - return filepath.SkipDir - } - return nil - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } - - relFilePath = strings.Replace(relFilePath, include, replacement, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", filePath, err) - } - return nil - }) - } - }() - - return pipeReader, nil -} - -// Unpack unpacks the decompressedArchive to dest with options. -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - tr := tar.NewReader(decompressedArchive) - trBuf := pools.BufioReader32KPool.Get(nil) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return err - } - - // Iterate through the files in the archive. -loop: - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // Normalize name, for safety and for a simple is-root check - // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: - // This keeps "..\" as-is, but normalizes "\..\" to "\". - hdr.Name = filepath.Clean(hdr.Name) - - for _, exclude := range options.ExcludePatterns { - if strings.HasPrefix(hdr.Name, exclude) { - continue loop - } - } - - // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in - // the filepath format for the OS on which the daemon is running. Hence - // the check for a slash-suffix MUST be done in an OS-agnostic way. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0777) - if err != nil { - return err - } - } - } - - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing directory with a non-directory from the archive. - return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) - } - - if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing non-directory with a directory from the archive. - return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) - } - - if fi.IsDir() && hdr.Name == "." { - continue - } - - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - trBuf.Reset(tr) - - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if hdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if hdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID - } - - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } - return nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - dest = filepath.Clean(dest) - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - r := tarArchive - if decompress { - decompressedArchive, err := DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return Unpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - logrus.Debugf("TarUntar(%s %s)", src, dst) - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } - } - return archiver.Untar(archive, dst, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return defaultArchiver.TarUntar(src, dst) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } - } - if err := archiver.Untar(archive, dst, options); err != nil { - return err - } - return nil -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return defaultArchiver.UntarPath(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - // Create dst, copy src's content into it - logrus.Debugf("Creating dest directory: %s", dst) - if err := system.MkdirAll(dst, 0755); err != nil { - return err - } - logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) - return archiver.TarUntar(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return defaultArchiver.CopyWithTar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) - srcSt, err := os.Stat(src) - if err != nil { - return err - } - - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - - // Clean up the trailing slash. This must be done in an operating - // system specific manner. - if dst[len(dst)-1] == os.PathSeparator { - dst = filepath.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { - return err - } - - r, w := io.Pipe() - errC := promise.Go(func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) - if err != nil { - return err - } - - // only perform mapping if the file being copied isn't already owned by the - // uid or gid of the remapped root in the container - if remappedRootUID != hdr.Uid { - xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if remappedRootGID != hdr.Gid { - xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID - } - - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }) - defer func() { - if er := <-errC; err != nil { - err = er - } - }() - - err = archiver.Untar(r, filepath.Dir(dst), nil) - if err != nil { - r.CloseWithError(err) - } - return err -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// Destination handling is in an operating specific manner depending -// where the daemon is running. If `dst` ends with a trailing slash -// the final destination path will be `dst/base(src)` (Linux) or -// `dst\base(src)` (Windows). -func CopyFileWithTar(src, dst string) (err error) { - return defaultArchiver.CopyFileWithTar(src, dst) -} - -// cmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { - chdone := make(chan struct{}) - cmd.Stdin = input - pipeR, pipeW := io.Pipe() - cmd.Stdout = pipeW - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, nil, err - } - - // Copy stdout to the returned pipe - go func() { - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - pipeW.Close() - } - close(chdone) - }() - - return pipeR, chdone, nil -} - -// NewTempArchive reads the content of src into a temporary file, and returns the contents -// of that file as an archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -func NewTempArchive(src Archive, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") - if err != nil { - return nil, err - } - if _, err := io.Copy(f, src); err != nil { - return nil, err - } - if _, err := f.Seek(0, 0); err != nil { - return nil, err - } - st, err := f.Stat() - if err != nil { - return nil, err - } - size := st.Size() - return &TempArchive{File: f, Size: size}, nil -} - -// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -type TempArchive struct { - *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 - closed bool -} - -// Close closes the underlying file if it's still open, or does a no-op -// to allow callers to try to close the TempArchive multiple times safely. -func (archive *TempArchive) Close() error { - if archive.closed { - return nil - } - - archive.closed = true - - return archive.File.Close() -} - -func (archive *TempArchive) Read(data []byte) (int, error) { - n, err := archive.File.Read(data) - archive.read += int64(n) - if err != nil || archive.read == archive.Size { - archive.Close() - os.Remove(archive.File.Name()) - } - return n, err -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go deleted file mode 100644 index 3e277281..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go +++ /dev/null @@ -1,1204 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "syscall" - "testing" - "time" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -func TestIsArchiveNilHeader(t *testing.T) { - out := IsArchive(nil) - if out { - t.Fatalf("isArchive should return false as nil is not a valid archive header") - } -} - -func TestIsArchiveInvalidHeader(t *testing.T) { - header := []byte{0x00, 0x01, 0x02} - out := IsArchive(header) - if out { - t.Fatalf("isArchive should return false as %s is not a valid archive header", header) - } -} - -func TestIsArchiveBzip2(t *testing.T) { - header := []byte{0x42, 0x5A, 0x68} - out := IsArchive(header) - if !out { - t.Fatalf("isArchive should return true as %s is a bz2 header", header) - } -} - -func TestIsArchive7zip(t *testing.T) { - header := []byte{0x50, 0x4b, 0x03, 0x04} - out := IsArchive(header) - if out { - t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) - } -} - -func TestDecompressStreamGzip(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - archive, err := os.Open("/tmp/archive.gz") - _, err = DecompressStream(archive) - if err != nil { - t.Fatalf("Failed to decompress a gzip file.") - } -} - -func TestDecompressStreamBzip2(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - archive, err := os.Open("/tmp/archive.bz2") - _, err = DecompressStream(archive) - if err != nil { - t.Fatalf("Failed to decompress a bzip2 file.") - } -} - -func TestDecompressStreamXz(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && xz -f /tmp/archive") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - archive, err := os.Open("/tmp/archive.xz") - _, err = DecompressStream(archive) - if err != nil { - t.Fatalf("Failed to decompress a xz file.") - } -} - -func TestCompressStreamXzUnsuported(t *testing.T) { - dest, err := os.Create("/tmp/dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - _, err = CompressStream(dest, Xz) - if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") - } -} - -func TestCompressStreamBzip2Unsupported(t *testing.T) { - dest, err := os.Create("/tmp/dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - _, err = CompressStream(dest, Xz) - if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") - } -} - -func TestCompressStreamInvalid(t *testing.T) { - dest, err := os.Create("/tmp/dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - _, err = CompressStream(dest, -1) - if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") - } -} - -func TestExtensionInvalid(t *testing.T) { - compression := Compression(-1) - output := compression.Extension() - if output != "" { - t.Fatalf("The extension of an invalid compression should be an empty string.") - } -} - -func TestExtensionUncompressed(t *testing.T) { - compression := Uncompressed - output := compression.Extension() - if output != "tar" { - t.Fatalf("The extension of a uncompressed archive should be 'tar'.") - } -} -func TestExtensionBzip2(t *testing.T) { - compression := Bzip2 - output := compression.Extension() - if output != "tar.bz2" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") - } -} -func TestExtensionGzip(t *testing.T) { - compression := Gzip - output := compression.Extension() - if output != "tar.gz" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") - } -} -func TestExtensionXz(t *testing.T) { - compression := Xz - output := compression.Extension() - if output != "tar.xz" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") - } -} - -func TestCmdStreamLargeStderr(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") - out, _, err := cmdStream(cmd, nil) - if err != nil { - t.Fatalf("Failed to start command: %s", err) - } - errCh := make(chan error) - go func() { - _, err := io.Copy(ioutil.Discard, out) - errCh <- err - }() - select { - case err := <-errCh: - if err != nil { - t.Fatalf("Command should not have failed (err=%.100s...)", err) - } - case <-time.After(5 * time.Second): - t.Fatalf("Command did not complete in 5 seconds; probable deadlock") - } -} - -func TestCmdStreamBad(t *testing.T) { - badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") - out, _, err := cmdStream(badCmd, nil) - if err != nil { - t.Fatalf("Failed to start command: %s", err) - } - if output, err := ioutil.ReadAll(out); err == nil { - t.Fatalf("Command should have failed") - } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { - t.Fatalf("Wrong error value (%s)", err) - } else if s := string(output); s != "hello\n" { - t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) - } -} - -func TestCmdStreamGood(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") - out, _, err := cmdStream(cmd, nil) - if err != nil { - t.Fatal(err) - } - if output, err := ioutil.ReadAll(out); err != nil { - t.Fatalf("Command should not have failed (err=%s)", err) - } else if s := string(output); s != "hello\n" { - t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) - } -} - -func TestUntarPathWithInvalidDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - invalidDestFolder := path.Join(tempFolder, "invalidDest") - // Create a src file - srcFile := path.Join(tempFolder, "src") - _, err = os.Create(srcFile) - if err != nil { - t.Fatalf("Fail to create the source file") - } - err = UntarPath(srcFile, invalidDestFolder) - if err == nil { - t.Fatalf("UntarPath with invalid destination path should throw an error.") - } -} - -func TestUntarPathWithInvalidSrc(t *testing.T) { - dest, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - defer os.RemoveAll(dest) - err = UntarPath("/invalid/path", dest) - if err == nil { - t.Fatalf("UntarPath with invalid src path should throw an error.") - } -} - -func TestUntarPath(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - srcFile := path.Join(tmpFolder, "src") - tarFile := path.Join(tmpFolder, "src.tar") - os.Create(path.Join(tmpFolder, "src")) - cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - destFolder := path.Join(tmpFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatalf("Fail to create the destination file") - } - err = UntarPath(tarFile, destFolder) - if err != nil { - t.Fatalf("UntarPath shouldn't throw an error, %s.", err) - } - expectedFile := path.Join(destFolder, srcFile) - _, err = os.Stat(expectedFile) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -// Do the same test as above but with the destination as file, it should fail -func TestUntarPathWithDestinationFile(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - srcFile := path.Join(tmpFolder, "src") - tarFile := path.Join(tmpFolder, "src.tar") - os.Create(path.Join(tmpFolder, "src")) - cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - destFile := path.Join(tmpFolder, "dest") - _, err = os.Create(destFile) - if err != nil { - t.Fatalf("Fail to create the destination file") - } - err = UntarPath(tarFile, destFile) - if err == nil { - t.Fatalf("UntarPath should throw an error if the destination if a file") - } -} - -// Do the same test as above but with the destination folder already exists -// and the destination file is a directory -// It's working, see https://github.com/docker/docker/issues/10040 -func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - srcFile := path.Join(tmpFolder, "src") - tarFile := path.Join(tmpFolder, "src.tar") - os.Create(srcFile) - cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - destFolder := path.Join(tmpFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatalf("Fail to create the destination folder") - } - // Let's create a folder that will has the same path as the extracted file (from tar) - destSrcFileAsFolder := path.Join(destFolder, srcFile) - err = os.MkdirAll(destSrcFileAsFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = UntarPath(tarFile, destFolder) - if err != nil { - t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") - } -} - -func TestCopyWithTarInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - destFolder := path.Join(tempFolder, "dest") - invalidSrc := path.Join(tempFolder, "doesnotexists") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = CopyWithTar(invalidSrc, destFolder) - if err == nil { - t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") - } -} - -func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - srcFolder := path.Join(tempFolder, "src") - inexistentDestFolder := path.Join(tempFolder, "doesnotexists") - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = CopyWithTar(srcFolder, inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") - } - _, err = os.Stat(inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder should create it.") - } -} - -// Test CopyWithTar with a file as src -func TestCopyWithTarSrcFile(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := path.Join(folder, "dest") - srcFolder := path.Join(folder, "src") - src := path.Join(folder, path.Join("src", "src")) - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest) - if err != nil { - t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - // FIXME Check the content - if err != nil { - t.Fatalf("Destination file should be the same as the source.") - } -} - -// Test CopyWithTar with a folder as src -func TestCopyWithTarSrcFolder(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := path.Join(folder, "dest") - src := path.Join(folder, path.Join("src", "folder")) - err = os.MkdirAll(src, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(path.Join(src, "file"), []byte("content"), 0777) - err = CopyWithTar(src, dest) - if err != nil { - t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - // FIXME Check the content (the file inside) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -func TestCopyFileWithTarInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - destFolder := path.Join(tempFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatal(err) - } - invalidFile := path.Join(tempFolder, "doesnotexists") - err = CopyFileWithTar(invalidFile, destFolder) - if err == nil { - t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") - } -} - -func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - defer os.RemoveAll(tempFolder) - srcFile := path.Join(tempFolder, "src") - inexistentDestFolder := path.Join(tempFolder, "doesnotexists") - _, err = os.Create(srcFile) - if err != nil { - t.Fatal(err) - } - err = CopyFileWithTar(srcFile, inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") - } - _, err = os.Stat(inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder should create it.") - } - // FIXME Test the src file and content -} - -func TestCopyFileWithTarSrcFolder(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := path.Join(folder, "dest") - src := path.Join(folder, "srcfolder") - err = os.MkdirAll(src, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - err = CopyFileWithTar(src, dest) - if err == nil { - t.Fatalf("CopyFileWithTar should throw an error with a folder.") - } -} - -func TestCopyFileWithTarSrcFile(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := path.Join(folder, "dest") - srcFolder := path.Join(folder, "src") - src := path.Join(folder, path.Join("src", "src")) - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest+"/") - if err != nil { - t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -func TestTarFiles(t *testing.T) { - // try without hardlinks - if err := checkNoChanges(1000, false); err != nil { - t.Fatal(err) - } - // try with hardlinks - if err := checkNoChanges(1000, true); err != nil { - t.Fatal(err) - } -} - -func checkNoChanges(fileNum int, hardlinks bool) error { - srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - if err != nil { - return err - } - defer os.RemoveAll(srcDir) - - destDir, err := ioutil.TempDir("", "docker-test-destDir") - if err != nil { - return err - } - defer os.RemoveAll(destDir) - - _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) - if err != nil { - return err - } - - err = TarUntar(srcDir, destDir) - if err != nil { - return err - } - - changes, err := ChangesDirs(destDir, srcDir) - if err != nil { - return err - } - if len(changes) > 0 { - return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) - } - return nil -} - -func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { - archive, err := TarWithOptions(origin, options) - if err != nil { - t.Fatal(err) - } - defer archive.Close() - - buf := make([]byte, 10) - if _, err := archive.Read(buf); err != nil { - return nil, err - } - wrap := io.MultiReader(bytes.NewReader(buf), archive) - - detectedCompression := DetectCompression(buf) - compression := options.Compression - if detectedCompression.Extension() != compression.Extension() { - return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) - } - - tmp, err := ioutil.TempDir("", "docker-test-untar") - if err != nil { - return nil, err - } - defer os.RemoveAll(tmp) - if err := Untar(wrap, tmp, nil); err != nil { - return nil, err - } - if _, err := os.Stat(tmp); err != nil { - return nil, err - } - - return ChangesDirs(origin, tmp) -} - -func TestTarUntar(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { - t.Fatal(err) - } - - for _, c := range []Compression{ - Uncompressed, - Gzip, - } { - changes, err := tarUntar(t, origin, &TarOptions{ - Compression: c, - ExcludePatterns: []string{"3"}, - }) - - if err != nil { - t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) - } - - if len(changes) != 1 || changes[0].Path != "/3" { - t.Fatalf("Unexpected differences after tarUntar: %v", changes) - } - } -} - -func TestTarUntarWithXattr(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { - t.Fatal(err) - } - if err := system.Lsetxattr(path.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { - t.Fatal(err) - } - - for _, c := range []Compression{ - Uncompressed, - Gzip, - } { - changes, err := tarUntar(t, origin, &TarOptions{ - Compression: c, - ExcludePatterns: []string{"3"}, - }) - - if err != nil { - t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) - } - - if len(changes) != 1 || changes[0].Path != "/3" { - t.Fatalf("Unexpected differences after tarUntar: %v", changes) - } - capability, _ := system.Lgetxattr(path.Join(origin, "2"), "security.capability") - if capability == nil && capability[0] != 0x00 { - t.Fatalf("Untar should have kept the 'security.capability' xattr.") - } - } -} - -func TestTarWithOptions(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - if _, err := ioutil.TempDir(origin, "folder"); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - - cases := []struct { - opts *TarOptions - numChanges int - }{ - {&TarOptions{IncludeFiles: []string{"1"}}, 2}, - {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, - {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, - {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, - {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, - } - for _, testCase := range cases { - changes, err := tarUntar(t, origin, testCase.opts) - if err != nil { - t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) - } - if len(changes) != testCase.numChanges { - t.Errorf("Expected %d changes, got %d for %+v:", - testCase.numChanges, len(changes), testCase.opts) - } - } -} - -// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz -// use PAX Global Extended Headers. -// Failing prevents the archives from being uncompressed during ADD -func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { - hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} - tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil) - if err != nil { - t.Fatal(err) - } -} - -// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. -// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. -func TestUntarUstarGnuConflict(t *testing.T) { - f, err := os.Open("testdata/broken.tar") - if err != nil { - t.Fatal(err) - } - found := false - tr := tar.NewReader(f) - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - t.Fatal(err) - } - if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { - found = true - break - } - } - if !found { - t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") - } -} - -func TestTarWithBlockCharFifo(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := system.Mknod(path.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } - if err := system.Mknod(path.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } - if err := system.Mknod(path.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } - - dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dest) - - // we'll do this in two steps to separate failure - fh, err := Tar(origin, Uncompressed) - if err != nil { - t.Fatal(err) - } - - // ensure we can read the whole thing with no error, before writing back out - buf, err := ioutil.ReadAll(fh) - if err != nil { - t.Fatal(err) - } - - bRdr := bytes.NewReader(buf) - err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - if err != nil { - t.Fatal(err) - } - - changes, err := ChangesDirs(origin, dest) - if err != nil { - t.Fatal(err) - } - if len(changes) > 0 { - t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) - } -} - -func TestTarWithHardLink(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil { - t.Fatal(err) - } - - var i1, i2 uint64 - if i1, err = getNlink(path.Join(origin, "1")); err != nil { - t.Fatal(err) - } - // sanity check that we can hardlink - if i1 != 2 { - t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) - } - - dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dest) - - // we'll do this in two steps to separate failure - fh, err := Tar(origin, Uncompressed) - if err != nil { - t.Fatal(err) - } - - // ensure we can read the whole thing with no error, before writing back out - buf, err := ioutil.ReadAll(fh) - if err != nil { - t.Fatal(err) - } - - bRdr := bytes.NewReader(buf) - err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - if err != nil { - t.Fatal(err) - } - - if i1, err = getInode(path.Join(dest, "1")); err != nil { - t.Fatal(err) - } - if i2, err = getInode(path.Join(dest, "2")); err != nil { - t.Fatal(err) - } - - if i1 != i2 { - t.Errorf("expected matching inodes, but got %d and %d", i1, i2) - } -} - -func getNlink(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - statT, ok := stat.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) - } - // We need this conversion on ARM64 - return uint64(statT.Nlink), nil -} - -func getInode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - statT, ok := stat.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) - } - return statT.Ino, nil -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} - -func BenchmarkTarUntar(b *testing.B) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - b.Fatal(err) - } - tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") - if err != nil { - b.Fatal(err) - } - target := path.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin, false) - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(origin) - defer os.RemoveAll(tempDir) - - b.ResetTimer() - b.SetBytes(int64(n)) - for n := 0; n < b.N; n++ { - err := TarUntar(origin, target) - if err != nil { - b.Fatal(err) - } - os.RemoveAll(target) - } -} - -func BenchmarkTarUntarWithLinks(b *testing.B) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - b.Fatal(err) - } - tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") - if err != nil { - b.Fatal(err) - } - target := path.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin, true) - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(origin) - defer os.RemoveAll(tempDir) - - b.ResetTimer() - b.SetBytes(int64(n)) - for n := 0; n < b.N; n++ { - err := TarUntar(origin, target) - if err != nil { - b.Fatal(err) - } - os.RemoveAll(target) - } -} - -func TestUntarInvalidFilenames(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { - { - Name: "../victim/dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { - { - // Note the leading slash - Name: "/../victim/slash-dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarHardlinkToSymlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { - { - Name: "symlink1", - Typeflag: tar.TypeSymlink, - Linkname: "regfile", - Mode: 0644, - }, - { - Name: "symlink2", - Typeflag: tar.TypeLink, - Linkname: "symlink1", - Mode: 0644, - }, - { - Name: "regfile", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarInvalidHardlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeLink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeLink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (hardlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try reading victim/hello (hardlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try removing victim directory (hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarInvalidSymlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeSymlink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeSymlink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try removing victim directory (symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try writing to victim/newdir/newfile with a symlink in the path - { - // this header needs to be before the next one, or else there is an error - Name: "dir/loophole", - Typeflag: tar.TypeSymlink, - Linkname: "../../victim", - Mode: 0755, - }, - { - Name: "dir/loophole/newdir/newfile", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestTempArchiveCloseMultipleTimes(t *testing.T) { - reader := ioutil.NopCloser(strings.NewReader("hello")) - tempArchive, err := NewTempArchive(reader, "") - buf := make([]byte, 10) - n, err := tempArchive.Read(buf) - if n != 5 { - t.Fatalf("Expected to read 5 bytes. Read %d instead", n) - } - for i := 0; i < 3; i++ { - if err = tempArchive.Close(); err != nil { - t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go deleted file mode 100644 index 07693e37..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go +++ /dev/null @@ -1,112 +0,0 @@ -// +build !windows - -package archive - -import ( - "archive/tar" - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a seperate function as this is platform specific. On Linux, we -// can't use filepath.Join(srcPath,include) because this will clean away -// a trailing "." or "/" which may be important. -func getWalkRoot(srcPath string, include string) string { - return srcPath + string(filepath.Separator) + include -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - return p, nil // already unix-style -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. - -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - err = errors.New("cannot convert stat value to syscall.Stat_t") - return - } - - inode = uint64(s.Ino) - - // Currently go does not fill in the major/minors - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) - hdr.Devminor = int64(minor(uint64(s.Rdev))) - } - - return -} - -func getFileUIDGID(stat interface{}) (int, int, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") - } - return int(s.Uid), int(s.Gid), nil -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= syscall.S_IFBLK - case tar.TypeChar: - mode |= syscall.S_IFCHR - case tar.TypeFifo: - mode |= syscall.S_IFIFO - } - - if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { - return err - } - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go deleted file mode 100644 index 18f45c48..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build !windows - -package archive - -import ( - "os" - "testing" -) - -func TestCanonicalTarNameForPath(t *testing.T) { - cases := []struct{ in, expected string }{ - {"foo", "foo"}, - {"foo/bar", "foo/bar"}, - {"foo/dir/", "foo/dir/"}, - } - for _, v := range cases { - if out, err := CanonicalTarNameForPath(v.in); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestCanonicalTarName(t *testing.T) { - cases := []struct { - in string - isDir bool - expected string - }{ - {"foo", false, "foo"}, - {"foo", true, "foo/"}, - {"foo/bar", false, "foo/bar"}, - {"foo/bar", true, "foo/bar/"}, - } - for _, v := range cases { - if out, err := canonicalTarName(v.in, v.isDir); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestChmodTarEntry(t *testing.T) { - cases := []struct { - in, expected os.FileMode - }{ - {0000, 0000}, - {0777, 0777}, - {0644, 0644}, - {0755, 0755}, - {0444, 0444}, - } - for _, v := range cases { - if out := chmodTarEntry(v.in); out != v.expected { - t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go deleted file mode 100644 index fbabc03a..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build windows - -package archive - -import ( - "archive/tar" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return longpath.AddPrefix(srcPath) -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a seperate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - // windows: convert windows style relative path with backslashes - // into forward slashes. Since windows does not allow '/' or '\' - // in file names, it is mostly safe to replace however we must - // check just in case - if strings.Contains(p, "/") { - return "", fmt.Errorf("Windows path contains forward slash: %s", p) - } - return strings.Replace(p, string(os.PathSeparator), "/", -1), nil - -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - perm &= 0755 - // Add the x bit: make everything +x from windows - perm |= 0111 - - return perm -} - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows - return -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} - -func getFileUIDGID(stat interface{}) (int, int, error) { - // no notion of file ownership mapping yet on Windows - return 0, 0, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go deleted file mode 100644 index b7abc402..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// +build windows - -package archive - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestCopyFileWithInvalidDest(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := "c:dest" - srcFolder := filepath.Join(folder, "src") - src := filepath.Join(folder, "src", "src") - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest) - if err == nil { - t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") - } -} - -func TestCanonicalTarNameForPath(t *testing.T) { - cases := []struct { - in, expected string - shouldFail bool - }{ - {"foo", "foo", false}, - {"foo/bar", "___", true}, // unix-styled windows path must fail - {`foo\bar`, "foo/bar", false}, - } - for _, v := range cases { - if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if v.shouldFail && err == nil { - t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) - } else if !v.shouldFail && out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestCanonicalTarName(t *testing.T) { - cases := []struct { - in string - isDir bool - expected string - }{ - {"foo", false, "foo"}, - {"foo", true, "foo/"}, - {`foo\bar`, false, "foo/bar"}, - {`foo\bar`, true, "foo/bar/"}, - } - for _, v := range cases { - if out, err := canonicalTarName(v.in, v.isDir); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestChmodTarEntry(t *testing.T) { - cases := []struct { - in, expected os.FileMode - }{ - {0000, 0111}, - {0777, 0755}, - {0644, 0755}, - {0755, 0755}, - {0444, 0555}, - } - for _, v := range cases { - if out := chmodTarEntry(v.in); out != v.expected { - t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go deleted file mode 100644 index 12ec4016..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go +++ /dev/null @@ -1,413 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// ChangeType represents the change type. -type ChangeType int - -const ( - // ChangeModify represents the modify operation. - ChangeModify = iota - // ChangeAdd represents the add operation. - ChangeAdd - // ChangeDelete represents the delete operation. - ChangeDelete -) - -// Change represents a change, it wraps the change type and path. -// It describes changes of the files in the path respect to the -// parent layers. The change could be modify, add, delete. -// This is used for layer diff. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - var kind string - switch change.Kind { - case ChangeModify: - kind = "C" - case ChangeAdd: - kind = "A" - case ChangeDelete: - kind = "D" - } - return fmt.Sprintf("%s %s", kind, change.Path) -} - -// for sort.Sort -type changesByPath []Change - -func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } -func (c changesByPath) Len() int { return len(c) } -func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -// Gnu tar and the go tar writer don't have sub-second mtime -// precision, which is problematic when we apply changes via tar -// files, we handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a == b || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -func sameFsTimeSpec(a, b syscall.Timespec) bool { - return a.Sec == b.Sec && - (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) -} - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func Changes(layers []string, rw string) ([]Change, error) { - var ( - changes []Change - changedDirs = make(map[string]struct{}) - ) - - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - // Skip AUFS metadata - if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched { - return err - } - - change := Change{ - Path: path, - } - - // Find out what kind of modification happened - file := filepath.Base(path) - // If there is a whiteout, then the file was removed - if strings.HasPrefix(file, WhiteoutPrefix) { - originalFile := file[len(WhiteoutPrefix):] - change.Path = filepath.Join(filepath.Dir(path), originalFile) - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directoriy in the rw and ro layers are all equal. - // Check https://github.com/docker/docker/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if change.Kind == ChangeAdd || change.Kind == ChangeDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - changes = append(changes, Change{Path: parent, Kind: ChangeModify}) - changedDirs[parent] = struct{}{} - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -// FileInfo describes the information of a file. -type FileInfo struct { - parent *FileInfo - name string - stat *system.StatT - children map[string]*FileInfo - capability []byte - added bool -} - -// LookUp looks up the file information of a file. -func (info *FileInfo) LookUp(path string) *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - parent := info - if path == string(os.PathSeparator) { - return info - } - - pathElements := strings.Split(path, string(os.PathSeparator)) - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - // As this runs on the daemon side, file paths are OS specific. - return string(os.PathSeparator) - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - - sizeAtEntry := len(*changes) - - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - info.added = true - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild, _ := oldChildren[name] - if oldChild != nil { - // change? - oldStat := oldChild.stat - newStat := newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if statDifferent(oldStat, newStat) || - bytes.Compare(oldChild.capability, newChild.capability) != 0 { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - newChild.added = true - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - - // If there were changes inside this directory, we need to add it, even if the directory - // itself wasn't changed. This is needed to properly save and restore filesystem permissions. - // As this runs on the daemon side, file paths are OS specific. - if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { - change := Change{ - Path: info.path(), - Kind: ChangeModify, - } - // Let's insert the directory entry before the recently added entries located inside this dir - *changes = append(*changes, change) // just to resize the slice, will be overwritten - copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) - (*changes)[sizeAtEntry] = change - } - -} - -// Changes add changes to file information. -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - root := &FileInfo{ - name: string(os.PathSeparator), - children: make(map[string]*FileInfo), - } - return root -} - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) - if oldDir == "" { - emptyDir, err := ioutil.TempDir("", "empty") - if err != nil { - return nil, err - } - defer os.Remove(emptyDir) - oldDir = emptyDir - } - oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) - if err != nil { - return nil, err - } - - return newRoot.Changes(oldRoot), nil -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -func ChangesSize(newDir string, changes []Change) int64 { - var ( - size int64 - sf = make(map[uint64]struct{}) - ) - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, err := os.Lstat(file) - if err != nil { - logrus.Errorf("Can not stat %q: %s", file, err) - continue - } - - if fileInfo != nil && !fileInfo.IsDir() { - if hasHardlinks(fileInfo) { - inode := getIno(fileInfo) - if _, ok := sf[inode]; !ok { - size += fileInfo.Size() - sf[inode] = struct{}{} - } - } else { - size += fileInfo.Size() - } - } - } - } - return size -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { - reader, writer := io.Pipe() - go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: uidMaps, - GIDMaps: gidMaps, - } - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - sort.Sort(changesByPath(changes)) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) - timestamp := time.Now() - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: timestamp, - AccessTime: timestamp, - ChangeTime: timestamp, - } - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) - } - if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) - } - }() - return reader, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go deleted file mode 100644 index 378cc09c..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go +++ /dev/null @@ -1,285 +0,0 @@ -package archive - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "sort" - "syscall" - "unsafe" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// walker is used to implement collectFileInfoForChanges on linux. Where this -// method in general returns the entire contents of two directory trees, we -// optimize some FS calls out on linux. In particular, we take advantage of the -// fact that getdents(2) returns the inode of each file in the directory being -// walked, which, when walking two trees in parallel to generate a list of -// changes, can be used to prune subtrees without ever having to lstat(2) them -// directly. Eliminating stat calls in this way can save up to seconds on large -// images. -type walker struct { - dir1 string - dir2 string - root1 *FileInfo - root2 *FileInfo -} - -// collectFileInfoForChanges returns a complete representation of the trees -// rooted at dir1 and dir2, with one important exception: any subtree or -// leaf where the inode and device numbers are an exact match between dir1 -// and dir2 will be pruned from the results. This method is *only* to be used -// to generating a list of changes between the two directories, as it does not -// reflect the full contents. -func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { - w := &walker{ - dir1: dir1, - dir2: dir2, - root1: newRootFileInfo(), - root2: newRootFileInfo(), - } - - i1, err := os.Lstat(w.dir1) - if err != nil { - return nil, nil, err - } - i2, err := os.Lstat(w.dir2) - if err != nil { - return nil, nil, err - } - - if err := w.walk("/", i1, i2); err != nil { - return nil, nil, err - } - - return w.root1, w.root2, nil -} - -// Given a FileInfo, its path info, and a reference to the root of the tree -// being constructed, register this file with the tree. -func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { - if fi == nil { - return nil - } - parent := root.LookUp(filepath.Dir(path)) - if parent == nil { - return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) - } - info := &FileInfo{ - name: filepath.Base(path), - children: make(map[string]*FileInfo), - parent: parent, - } - cpath := filepath.Join(dir, path) - stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) - if err != nil { - return err - } - info.stat = stat - info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - parent.children[info.name] = info - return nil -} - -// Walk a subtree rooted at the same path in both trees being iterated. For -// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d -func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { - // Register these nodes with the return trees, unless we're still at the - // (already-created) roots: - if path != "/" { - if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { - return err - } - if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { - return err - } - } - - is1Dir := i1 != nil && i1.IsDir() - is2Dir := i2 != nil && i2.IsDir() - - sameDevice := false - if i1 != nil && i2 != nil { - si1 := i1.Sys().(*syscall.Stat_t) - si2 := i2.Sys().(*syscall.Stat_t) - if si1.Dev == si2.Dev { - sameDevice = true - } - } - - // If these files are both non-existent, or leaves (non-dirs), we are done. - if !is1Dir && !is2Dir { - return nil - } - - // Fetch the names of all the files contained in both directories being walked: - var names1, names2 []nameIno - if is1Dir { - names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access - if err != nil { - return err - } - } - if is2Dir { - names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access - if err != nil { - return err - } - } - - // We have lists of the files contained in both parallel directories, sorted - // in the same order. Walk them in parallel, generating a unique merged list - // of all items present in either or both directories. - var names []string - ix1 := 0 - ix2 := 0 - - for { - if ix1 >= len(names1) { - break - } - if ix2 >= len(names2) { - break - } - - ni1 := names1[ix1] - ni2 := names2[ix2] - - switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { - case -1: // ni1 < ni2 -- advance ni1 - // we will not encounter ni1 in names2 - names = append(names, ni1.name) - ix1++ - case 0: // ni1 == ni2 - if ni1.ino != ni2.ino || !sameDevice { - names = append(names, ni1.name) - } - ix1++ - ix2++ - case 1: // ni1 > ni2 -- advance ni2 - // we will not encounter ni2 in names1 - names = append(names, ni2.name) - ix2++ - } - } - for ix1 < len(names1) { - names = append(names, names1[ix1].name) - ix1++ - } - for ix2 < len(names2) { - names = append(names, names2[ix2].name) - ix2++ - } - - // For each of the names present in either or both of the directories being - // iterated, stat the name under each root, and recurse the pair of them: - for _, name := range names { - fname := filepath.Join(path, name) - var cInfo1, cInfo2 os.FileInfo - if is1Dir { - cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if is2Dir { - cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if err = w.walk(fname, cInfo1, cInfo2); err != nil { - return err - } - } - return nil -} - -// {name,inode} pairs used to support the early-pruning logic of the walker type -type nameIno struct { - name string - ino uint64 -} - -type nameInoSlice []nameIno - -func (s nameInoSlice) Len() int { return len(s) } -func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } - -// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode -// numbers further up the stack when reading directory contents. Unlike -// os.Readdirnames, which returns a list of filenames, this function returns a -// list of {filename,inode} pairs. -func readdirnames(dirname string) (names []nameIno, err error) { - var ( - size = 100 - buf = make([]byte, 4096) - nbuf int - bufp int - nb int - ) - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - names = make([]nameIno, 0, size) // Empty with room to grow. - for { - // Refill the buffer if necessary - if bufp >= nbuf { - bufp = 0 - nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux - if nbuf < 0 { - nbuf = 0 - } - if err != nil { - return nil, os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - break // EOF - } - } - - // Drain the buffer - nb, names = parseDirent(buf[bufp:nbuf], names) - bufp += nb - } - - sl := nameInoSlice(names) - sort.Sort(sl) - return sl, nil -} - -// parseDirent is a minor modification of syscall.ParseDirent (linux version) -// which returns {name,inode} pairs instead of just names. -func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { - origlen := len(buf) - for len(buf) > 0 { - dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) - buf = buf[dirent.Reclen:] - if dirent.Ino == 0 { // File absent in directory. - continue - } - bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - var name = string(bytes[0:clen(bytes[:])]) - if name == "." || name == ".." { // Useless names - continue - } - names = append(names, nameIno{name, dirent.Ino}) - } - return origlen - len(buf), names -} - -func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } - } - return len(n) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go deleted file mode 100644 index 35832f08..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build !linux - -package archive - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { - var ( - oldRoot, newRoot *FileInfo - err1, err2 error - errs = make(chan error, 2) - ) - go func() { - oldRoot, err1 = collectFileInfo(oldDir) - errs <- err1 - }() - go func() { - newRoot, err2 = collectFileInfo(newDir) - errs <- err2 - }() - - // block until both routines have returned - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - return nil, nil, err - } - } - - return oldRoot, newRoot, nil -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - relPath = filepath.Join(string(os.PathSeparator), relPath) - - // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. - // Temporary workaround. If the returned path starts with two backslashes, - // trim it down to a single backslash. Only relevant on Windows. - if runtime.GOOS == "windows" { - if strings.HasPrefix(relPath, `\\`) { - relPath = relPath[1:] - } - } - - if relPath == string(os.PathSeparator) { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - } - - s, err := system.Lstat(path) - if err != nil { - return err - } - info.stat = s - - info.capability, _ = system.Lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go deleted file mode 100644 index 5a3282b5..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package archive - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "sort" - "testing" -) - -func TestHardLinkOrder(t *testing.T) { - names := []string{"file1.txt", "file2.txt", "file3.txt"} - msg := []byte("Hey y'all") - - // Create dir - src, err := ioutil.TempDir("", "docker-hardlink-test-src-") - if err != nil { - t.Fatal(err) - } - //defer os.RemoveAll(src) - for _, name := range names { - func() { - fh, err := os.Create(path.Join(src, name)) - if err != nil { - t.Fatal(err) - } - defer fh.Close() - if _, err = fh.Write(msg); err != nil { - t.Fatal(err) - } - }() - } - // Create dest, with changes that includes hardlinks - dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") - if err != nil { - t.Fatal(err) - } - os.RemoveAll(dest) // we just want the name, at first - if err := copyDir(src, dest); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dest) - for _, name := range names { - for i := 0; i < 5; i++ { - if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { - t.Fatal(err) - } - } - } - - // get changes - changes, err := ChangesDirs(dest, src) - if err != nil { - t.Fatal(err) - } - - // sort - sort.Sort(changesByPath(changes)) - - // ExportChanges - ar, err := ExportChanges(dest, changes, nil, nil) - if err != nil { - t.Fatal(err) - } - hdrs, err := walkHeaders(ar) - if err != nil { - t.Fatal(err) - } - - // reverse sort - sort.Sort(sort.Reverse(changesByPath(changes))) - // ExportChanges - arRev, err := ExportChanges(dest, changes, nil, nil) - if err != nil { - t.Fatal(err) - } - hdrsRev, err := walkHeaders(arRev) - if err != nil { - t.Fatal(err) - } - - // line up the two sets - sort.Sort(tarHeaders(hdrs)) - sort.Sort(tarHeaders(hdrsRev)) - - // compare Size and LinkName - for i := range hdrs { - if hdrs[i].Name != hdrsRev[i].Name { - t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) - } - if hdrs[i].Size != hdrsRev[i].Size { - t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) - } - if hdrs[i].Typeflag != hdrsRev[i].Typeflag { - t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) - } - if hdrs[i].Linkname != hdrsRev[i].Linkname { - t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) - } - } - -} - -type tarHeaders []tar.Header - -func (th tarHeaders) Len() int { return len(th) } -func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } -func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } - -func walkHeaders(r io.Reader) ([]tar.Header, error) { - t := tar.NewReader(r) - headers := []tar.Header{} - for { - hdr, err := t.Next() - if err != nil { - if err == io.EOF { - break - } - return headers, err - } - headers = append(headers, *hdr) - } - return headers, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go deleted file mode 100644 index 52daaa64..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go +++ /dev/null @@ -1,524 +0,0 @@ -package archive - -import ( - "io/ioutil" - "os" - "os/exec" - "path" - "sort" - "testing" - "time" -) - -func max(x, y int) int { - if x >= y { - return x - } - return y -} - -func copyDir(src, dst string) error { - cmd := exec.Command("cp", "-a", src, dst) - if err := cmd.Run(); err != nil { - return err - } - return nil -} - -type FileType uint32 - -const ( - Regular FileType = iota - Dir - Symlink -) - -type FileData struct { - filetype FileType - path string - contents string - permissions os.FileMode -} - -func createSampleDir(t *testing.T, root string) { - files := []FileData{ - {Regular, "file1", "file1\n", 0600}, - {Regular, "file2", "file2\n", 0666}, - {Regular, "file3", "file3\n", 0404}, - {Regular, "file4", "file4\n", 0600}, - {Regular, "file5", "file5\n", 0600}, - {Regular, "file6", "file6\n", 0600}, - {Regular, "file7", "file7\n", 0600}, - {Dir, "dir1", "", 0740}, - {Regular, "dir1/file1-1", "file1-1\n", 01444}, - {Regular, "dir1/file1-2", "file1-2\n", 0666}, - {Dir, "dir2", "", 0700}, - {Regular, "dir2/file2-1", "file2-1\n", 0666}, - {Regular, "dir2/file2-2", "file2-2\n", 0666}, - {Dir, "dir3", "", 0700}, - {Regular, "dir3/file3-1", "file3-1\n", 0666}, - {Regular, "dir3/file3-2", "file3-2\n", 0666}, - {Dir, "dir4", "", 0700}, - {Regular, "dir4/file3-1", "file4-1\n", 0666}, - {Regular, "dir4/file3-2", "file4-2\n", 0666}, - {Symlink, "symlink1", "target1", 0666}, - {Symlink, "symlink2", "target2", 0666}, - } - - now := time.Now() - for _, info := range files { - p := path.Join(root, info.path) - if info.filetype == Dir { - if err := os.MkdirAll(p, info.permissions); err != nil { - t.Fatal(err) - } - } else if info.filetype == Regular { - if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { - t.Fatal(err) - } - } else if info.filetype == Symlink { - if err := os.Symlink(info.contents, p); err != nil { - t.Fatal(err) - } - } - - if info.filetype != Symlink { - // Set a consistent ctime, atime for all files and dirs - if err := os.Chtimes(p, now, now); err != nil { - t.Fatal(err) - } - } - } -} - -func TestChangeString(t *testing.T) { - modifiyChange := Change{"change", ChangeModify} - toString := modifiyChange.String() - if toString != "C change" { - t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) - } - addChange := Change{"change", ChangeAdd} - toString = addChange.String() - if toString != "A change" { - t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) - } - deleteChange := Change{"change", ChangeDelete} - toString = deleteChange.String() - if toString != "D change" { - t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) - } -} - -func TestChangesWithNoChanges(t *testing.T) { - rwLayer, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(rwLayer) - layer, err := ioutil.TempDir("", "docker-changes-test-layer") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(layer) - createSampleDir(t, layer) - changes, err := Changes([]string{layer}, rwLayer) - if err != nil { - t.Fatal(err) - } - if len(changes) != 0 { - t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) - } -} - -func TestChangesWithChanges(t *testing.T) { - // Mock the readonly layer - layer, err := ioutil.TempDir("", "docker-changes-test-layer") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(layer) - createSampleDir(t, layer) - os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) - - // Mock the RW layer - rwLayer, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(rwLayer) - - // Create a folder in RW layer - dir1 := path.Join(rwLayer, "dir1") - os.MkdirAll(dir1, 0740) - deletedFile := path.Join(dir1, ".wh.file1-2") - ioutil.WriteFile(deletedFile, []byte{}, 0600) - modifiedFile := path.Join(dir1, "file1-1") - ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) - // Let's add a subfolder for a newFile - subfolder := path.Join(dir1, "subfolder") - os.MkdirAll(subfolder, 0740) - newFile := path.Join(subfolder, "newFile") - ioutil.WriteFile(newFile, []byte{}, 0740) - - changes, err := Changes([]string{layer}, rwLayer) - if err != nil { - t.Fatal(err) - } - - expectedChanges := []Change{ - {"/dir1", ChangeModify}, - {"/dir1/file1-1", ChangeModify}, - {"/dir1/file1-2", ChangeDelete}, - {"/dir1/subfolder", ChangeModify}, - {"/dir1/subfolder/newFile", ChangeAdd}, - } - checkChanges(expectedChanges, changes, t) -} - -// See https://github.com/docker/docker/pull/13590 -func TestChangesWithChangesGH13590(t *testing.T) { - baseLayer, err := ioutil.TempDir("", "docker-changes-test.") - defer os.RemoveAll(baseLayer) - - dir3 := path.Join(baseLayer, "dir1/dir2/dir3") - os.MkdirAll(dir3, 07400) - - file := path.Join(dir3, "file.txt") - ioutil.WriteFile(file, []byte("hello"), 0666) - - layer, err := ioutil.TempDir("", "docker-changes-test2.") - defer os.RemoveAll(layer) - - // Test creating a new file - if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { - t.Fatalf("Cmd failed: %q", err) - } - - os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) - file = path.Join(layer, "dir1/dir2/dir3/file1.txt") - ioutil.WriteFile(file, []byte("bye"), 0666) - - changes, err := Changes([]string{baseLayer}, layer) - if err != nil { - t.Fatal(err) - } - - expectedChanges := []Change{ - {"/dir1/dir2/dir3", ChangeModify}, - {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, - } - checkChanges(expectedChanges, changes, t) - - // Now test changing a file - layer, err = ioutil.TempDir("", "docker-changes-test3.") - defer os.RemoveAll(layer) - - if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { - t.Fatalf("Cmd failed: %q", err) - } - - file = path.Join(layer, "dir1/dir2/dir3/file.txt") - ioutil.WriteFile(file, []byte("bye"), 0666) - - changes, err = Changes([]string{baseLayer}, layer) - if err != nil { - t.Fatal(err) - } - - expectedChanges = []Change{ - {"/dir1/dir2/dir3/file.txt", ChangeModify}, - } - checkChanges(expectedChanges, changes, t) -} - -// Create an directory, copy it, make sure we report no changes between the two -func TestChangesDirsEmpty(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(src) - createSampleDir(t, src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dst) - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - if len(changes) != 0 { - t.Fatalf("Reported changes for identical dirs: %v", changes) - } - os.RemoveAll(src) - os.RemoveAll(dst) -} - -func mutateSampleDir(t *testing.T, root string) { - // Remove a regular file - if err := os.RemoveAll(path.Join(root, "file1")); err != nil { - t.Fatal(err) - } - - // Remove a directory - if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { - t.Fatal(err) - } - - // Remove a symlink - if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { - t.Fatal(err) - } - - // Rewrite a file - if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { - t.Fatal(err) - } - - // Replace a file - if err := os.RemoveAll(path.Join(root, "file3")); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { - t.Fatal(err) - } - - // Touch file - if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { - t.Fatal(err) - } - - // Replace file with dir - if err := os.RemoveAll(path.Join(root, "file5")); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { - t.Fatal(err) - } - - // Create new file - if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { - t.Fatal(err) - } - - // Create new dir - if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { - t.Fatal(err) - } - - // Create a new symlink - if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { - t.Fatal(err) - } - - // Change a symlink - if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { - t.Fatal(err) - } - if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { - t.Fatal(err) - } - - // Replace dir with file - if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { - t.Fatal(err) - } - - // Touch dir - if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { - t.Fatal(err) - } -} - -func TestChangesDirsMutated(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - createSampleDir(t, src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(src) - defer os.RemoveAll(dst) - - mutateSampleDir(t, dst) - - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - sort.Sort(changesByPath(changes)) - - expectedChanges := []Change{ - {"/dir1", ChangeDelete}, - {"/dir2", ChangeModify}, - {"/dirnew", ChangeAdd}, - {"/file1", ChangeDelete}, - {"/file2", ChangeModify}, - {"/file3", ChangeModify}, - {"/file4", ChangeModify}, - {"/file5", ChangeModify}, - {"/filenew", ChangeAdd}, - {"/symlink1", ChangeDelete}, - {"/symlink2", ChangeModify}, - {"/symlinknew", ChangeAdd}, - } - - for i := 0; i < max(len(changes), len(expectedChanges)); i++ { - if i >= len(expectedChanges) { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } - if i >= len(changes) { - t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) - } - if changes[i].Path == expectedChanges[i].Path { - if changes[i] != expectedChanges[i] { - t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) - } - } else if changes[i].Path < expectedChanges[i].Path { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } else { - t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) - } - } -} - -func TestApplyLayer(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - createSampleDir(t, src) - defer os.RemoveAll(src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - mutateSampleDir(t, dst) - defer os.RemoveAll(dst) - - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - layer, err := ExportChanges(dst, changes, nil, nil) - if err != nil { - t.Fatal(err) - } - - layerCopy, err := NewTempArchive(layer, "") - if err != nil { - t.Fatal(err) - } - - if _, err := ApplyLayer(src, layerCopy); err != nil { - t.Fatal(err) - } - - changes2, err := ChangesDirs(src, dst) - if err != nil { - t.Fatal(err) - } - - if len(changes2) != 0 { - t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) - } -} - -func TestChangesSizeWithHardlinks(t *testing.T) { - srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(srcDir) - - destDir, err := ioutil.TempDir("", "docker-test-destDir") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(destDir) - - creationSize, err := prepareUntarSourceDirectory(100, destDir, true) - if err != nil { - t.Fatal(err) - } - - changes, err := ChangesDirs(destDir, srcDir) - if err != nil { - t.Fatal(err) - } - - got := ChangesSize(destDir, changes) - if got != int64(creationSize) { - t.Errorf("Expected %d bytes of changes, got %d", creationSize, got) - } -} - -func TestChangesSizeWithNoChanges(t *testing.T) { - size := ChangesSize("/tmp", nil) - if size != 0 { - t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) - } -} - -func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { - changes := []Change{ - {Path: "deletedPath", Kind: ChangeDelete}, - } - size := ChangesSize("/tmp", changes) - if size != 0 { - t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) - } -} - -func TestChangesSize(t *testing.T) { - parentPath, err := ioutil.TempDir("", "docker-changes-test") - defer os.RemoveAll(parentPath) - addition := path.Join(parentPath, "addition") - if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { - t.Fatal(err) - } - modification := path.Join(parentPath, "modification") - if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { - t.Fatal(err) - } - changes := []Change{ - {Path: "addition", Kind: ChangeAdd}, - {Path: "modification", Kind: ChangeModify}, - } - size := ChangesSize(parentPath, changes) - if size != 6 { - t.Fatalf("Expected 6 bytes of changes, got %d", size) - } -} - -func checkChanges(expectedChanges, changes []Change, t *testing.T) { - sort.Sort(changesByPath(expectedChanges)) - sort.Sort(changesByPath(changes)) - for i := 0; i < max(len(changes), len(expectedChanges)); i++ { - if i >= len(expectedChanges) { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } - if i >= len(changes) { - t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) - } - if changes[i].Path == expectedChanges[i].Path { - if changes[i] != expectedChanges[i] { - t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) - } - } else if changes[i].Path < expectedChanges[i].Path { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } else { - t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go deleted file mode 100644 index 6646b4df..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build !windows - -package archive - -import ( - "os" - "syscall" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mode() != newStat.Mode() || - oldStat.UID() != newStat.UID() || - oldStat.GID() != newStat.GID() || - oldStat.Rdev() != newStat.Rdev() || - // Don't look at size for dirs, its not a good measure of change - (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && - (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 -} - -func getIno(fi os.FileInfo) uint64 { - return uint64(fi.Sys().(*syscall.Stat_t).Ino) -} - -func hasHardlinks(fi os.FileInfo) bool { - return fi.Sys().(*syscall.Stat_t).Nlink > 1 -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go deleted file mode 100644 index 2d8708d0..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package archive - -import ( - "os" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - - // Don't look at size for dirs, its not a good measure of change - if oldStat.ModTime() != newStat.ModTime() || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go deleted file mode 100644 index 251c9bd9..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go +++ /dev/null @@ -1,407 +0,0 @@ -package archive - -import ( - "archive/tar" - "errors" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// Errors used or returned by this file. -var ( - ErrNotDirectory = errors.New("not a directory") - ErrDirNotExists = errors.New("no such directory") - ErrCannotCopyDir = errors.New("cannot copy directory") - ErrInvalidCopySource = errors.New("invalid copy source content") -) - -// PreserveTrailingDotOrSeparator returns the given cleaned path (after -// processing using any utility functions from the path or filepath stdlib -// packages) and appends a trailing `/.` or `/` if its corresponding original -// path (from before being processed by utility functions from the path or -// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned -// path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in a path separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { - // Ensure paths are in platform semantics - cleanedPath = normalizePath(cleanedPath) - originalPath = normalizePath(originalPath) - - if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { - if !hasTrailingPathSeparator(cleanedPath) { - // Add a separator if it doesn't already end with one (a cleaned - // path would only end in a separator if it is the root). - cleanedPath += string(filepath.Separator) - } - cleanedPath += "." - } - - if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { - cleanedPath += string(filepath.Separator) - } - - return cleanedPath -} - -// assertsDirectory returns whether the given path is -// asserted to be a directory, i.e., the path ends with -// a trailing '/' or `/.`, assuming a path separator of `/`. -func assertsDirectory(path string) bool { - return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) -} - -// hasTrailingPathSeparator returns whether the given -// path ends with the system's path separator character. -func hasTrailingPathSeparator(path string) bool { - return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) -} - -// specifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func specifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} - -// SplitPathDirEntry splits the given path between its directory name and its -// basename by first cleaning the path but preserves a trailing "." if the -// original path specified the current directory. -func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(normalizePath(path)) - - if specifiesCurrentDir(path) { - cleanedPath += string(filepath.Separator) + "." - } - - return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) -} - -// TarResource archives the resource described by the given CopyInfo to a Tar -// archive. A non-nil error is returned if sourcePath does not exist or is -// asserted to be a directory but exists as another type of file. -// -// This function acts as a convenient wrapper around TarWithOptions, which -// requires a directory as the source path. TarResource accepts either a -// directory or a file path and correctly sets the Tar options. -func TarResource(sourceInfo CopyInfo) (content Archive, err error) { - return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) -} - -// TarResourceRebase is like TarResource but renames the first path element of -// items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { - sourcePath = normalizePath(sourcePath) - if _, err = os.Lstat(sourcePath); err != nil { - // Catches the case where the source does not exist or is not a - // directory if asserted to be a directory, as this also causes an - // error. - return - } - - // Separate the source path between it's directory and - // the entry in that directory which we are archiving. - sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - - filter := []string{sourceBase} - - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) - - return TarWithOptions(sourceDir, &TarOptions{ - Compression: Uncompressed, - IncludeFiles: filter, - IncludeSourceDir: true, - RebaseNames: map[string]string{ - sourceBase: rebaseName, - }, - }) -} - -// CopyInfo holds basic info about the source -// or destination path of a copy operation. -type CopyInfo struct { - Path string - Exists bool - IsDir bool - RebaseName string -} - -// CopyInfoSourcePath stats the given path to create a CopyInfo -// struct representing that resource for the source of an archive copy -// operation. The given path should be an absolute local path. A source path -// has all symlinks evaluated that appear before the last path separator ("/" -// on Unix). As it is to be a copy source, the path must exist. -func CopyInfoSourcePath(path string) (CopyInfo, error) { - // Split the given path into its Directory and Base components. We will - // evaluate symlinks in the directory component then append the base. - path = normalizePath(path) - dirPath, basePath := filepath.Split(path) - - resolvedDirPath, err := filepath.EvalSymlinks(dirPath) - if err != nil { - return CopyInfo{}, err - } - - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath := resolvedDirPath + string(filepath.Separator) + basePath - - var rebaseName string - if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { - // In the case where the path had a trailing separator and a symlink - // evaluation has changed the last path component, we will need to - // rebase the name in the archive that is being copied to match the - // originally requested name. - rebaseName = filepath.Base(path) - } - - stat, err := os.Lstat(resolvedPath) - if err != nil { - return CopyInfo{}, err - } - - return CopyInfo{ - Path: resolvedPath, - Exists: true, - IsDir: stat.IsDir(), - RebaseName: rebaseName, - }, nil -} - -// CopyInfoDestinationPath stats the given path to create a CopyInfo -// struct representing that resource for the destination of an archive copy -// operation. The given path should be an absolute local path. -func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { - maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. - path = normalizePath(path) - originalPath := path - - stat, err := os.Lstat(path) - - if err == nil && stat.Mode()&os.ModeSymlink == 0 { - // The path exists and is not a symlink. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil - } - - // While the path is a symlink. - for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { - if n > maxSymlinkIter { - // Don't follow symlinks more than this arbitrary number of times. - return CopyInfo{}, errors.New("too many symlinks in " + originalPath) - } - - // The path is a symbolic link. We need to evaluate it so that the - // destination of the copy operation is the link target and not the - // link itself. This is notably different than CopyInfoSourcePath which - // only evaluates symlinks before the last appearing path separator. - // Also note that it is okay if the last path element is a broken - // symlink as the copy operation should create the target. - var linkTarget string - - linkTarget, err = os.Readlink(path) - if err != nil { - return CopyInfo{}, err - } - - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := SplitPathDirEntry(path) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - path = linkTarget - stat, err = os.Lstat(path) - } - - if err != nil { - // It's okay if the destination path doesn't exist. We can still - // continue the copy operation if the parent directory exists. - if !os.IsNotExist(err) { - return CopyInfo{}, err - } - - // Ensure destination parent dir exists. - dstParent, _ := SplitPathDirEntry(path) - - parentDirStat, err := os.Lstat(dstParent) - if err != nil { - return CopyInfo{}, err - } - if !parentDirStat.IsDir() { - return CopyInfo{}, ErrNotDirectory - } - - return CopyInfo{Path: path}, nil - } - - // The path exists after resolving symlinks. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil -} - -// PrepareArchiveCopy prepares the given srcContent archive, which should -// contain the archived resource described by srcInfo, to the destination -// described by dstInfo. Returns the possibly modified content archive along -// with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { - // Ensure in platform semantics - srcInfo.Path = normalizePath(srcInfo.Path) - dstInfo.Path = normalizePath(dstInfo.Path) - - // Separate the destination path between its directory and base - // components in case the source archive contents need to be rebased. - dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) - _, srcBase := SplitPathDirEntry(srcInfo.Path) - - switch { - case dstInfo.Exists && dstInfo.IsDir: - // The destination exists as a directory. No alteration - // to srcContent is needed as its contents can be - // simply extracted to the destination directory. - return dstInfo.Path, ioutil.NopCloser(srcContent), nil - case dstInfo.Exists && srcInfo.IsDir: - // The destination exists as some type of file and the source - // content is a directory. This is an error condition since - // you cannot copy a directory to an existing file location. - return "", nil, ErrCannotCopyDir - case dstInfo.Exists: - // The destination exists as some type of file and the source content - // is also a file. The source content entry will have to be renamed to - // have a basename which matches the destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case srcInfo.IsDir: - // The destination does not exist and the source content is an archive - // of a directory. The archive should be extracted to the parent of - // the destination path instead, and when it is, the directory that is - // created as a result should take the name of the destination path. - // The source content entries will have to be renamed to have a - // basename which matches the destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case assertsDirectory(dstInfo.Path): - // The destination does not exist and is asserted to be created as a - // directory, but the source content is not a directory. This is an - // error condition since you cannot create a directory from a file - // source. - return "", nil, ErrDirNotExists - default: - // The last remaining case is when the destination does not exist, is - // not asserted to be a directory, and the source content is not an - // archive of a directory. It this case, the destination file will need - // to be created when the archive is extracted and the source content - // entry will have to be renamed to have a basename which matches the - // destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil - } - -} - -// rebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurrence of oldBase with newBase at the beginning of entry names. -func rebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { - if oldBase == string(os.PathSeparator) { - // If oldBase specifies the root directory, use an empty string as - // oldBase instead so that newBase doesn't replace the path separator - // that all paths will start with. - oldBase = "" - } - - rebased, w := io.Pipe() - - go func() { - srcTar := tar.NewReader(srcContent) - rebasedTar := tar.NewWriter(w) - - for { - hdr, err := srcTar.Next() - if err == io.EOF { - // Signals end of archive. - rebasedTar.Close() - w.Close() - return - } - if err != nil { - w.CloseWithError(err) - return - } - - hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) - - if err = rebasedTar.WriteHeader(hdr); err != nil { - w.CloseWithError(err) - return - } - - if _, err = io.Copy(rebasedTar, srcTar); err != nil { - w.CloseWithError(err) - return - } - } - }() - - return rebased -} - -// CopyResource performs an archive copy from the given source path to the -// given destination path. The source path MUST exist and the destination -// path's parent directory must exist. -func CopyResource(srcPath, dstPath string) error { - var ( - srcInfo CopyInfo - err error - ) - - // Ensure in platform semantics - srcPath = normalizePath(srcPath) - dstPath = normalizePath(dstPath) - - // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) - - if srcInfo, err = CopyInfoSourcePath(srcPath); err != nil { - return err - } - - content, err := TarResource(srcInfo) - if err != nil { - return err - } - defer content.Close() - - return CopyTo(content, srcInfo, dstPath) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { - // The destination path need not exist, but CopyInfoDestinationPath will - // ensure that at least the parent directory exists. - dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) - if err != nil { - return err - } - - dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) - if err != nil { - return err - } - defer copyArchive.Close() - - options := &TarOptions{ - NoLchown: true, - NoOverwriteDirNonDir: true, - } - - return Untar(copyArchive, dstDir, options) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go deleted file mode 100644 index 25f1811a..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go +++ /dev/null @@ -1,625 +0,0 @@ -package archive - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" -) - -func removeAllPaths(paths ...string) { - for _, path := range paths { - os.RemoveAll(path) - } -} - -func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { - var err error - - if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { - t.Fatal(err) - } - - if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { - t.Fatal(err) - } - - return -} - -func isNotDir(err error) bool { - return strings.Contains(err.Error(), "not a directory") -} - -func joinTrailingSep(pathElements ...string) string { - joined := filepath.Join(pathElements...) - - return fmt.Sprintf("%s%c", joined, filepath.Separator) -} - -func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { - t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) - - fileA, err := os.Open(filenameA) - if err != nil { - return - } - defer fileA.Close() - - fileB, err := os.Open(filenameB) - if err != nil { - return - } - defer fileB.Close() - - hasher := sha256.New() - - if _, err = io.Copy(hasher, fileA); err != nil { - return - } - - hashA := hasher.Sum(nil) - hasher.Reset() - - if _, err = io.Copy(hasher, fileB); err != nil { - return - } - - hashB := hasher.Sum(nil) - - if !bytes.Equal(hashA, hashB) { - err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) - } - - return -} - -func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { - t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) - - var changes []Change - - if changes, err = ChangesDirs(newDir, oldDir); err != nil { - return - } - - if len(changes) != 0 { - err = fmt.Errorf("expected no changes between directories, but got: %v", changes) - } - - return -} - -func logDirContents(t *testing.T, dirPath string) { - logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { - if err != nil { - t.Errorf("stat error for path %q: %s", path, err) - return nil - } - - if info.IsDir() { - path = joinTrailingSep(path) - } - - t.Logf("\t%s", path) - - return nil - }) - - t.Logf("logging directory contents: %q", dirPath) - - if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { - t.Fatal(err) - } -} - -func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { - t.Logf("copying from %q to %q", srcPath, dstPath) - - return CopyResource(srcPath, dstPath) -} - -// Basic assumptions about SRC and DST: -// 1. SRC must exist. -// 2. If SRC ends with a trailing separator, it must be a directory. -// 3. DST parent directory must exist. -// 4. If DST exists as a file, it must not end with a trailing separator. - -// First get these easy error cases out of the way. - -// Test for error when SRC does not exist. -func TestCopyErrSrcNotExists(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1")); !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } -} - -// Test for error when SRC ends in a trailing -// path separator but it exists as a file. -func TestCopyErrSrcNotDir(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1")); !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } -} - -// Test for error when SRC is a valid file or directory, -// but the DST parent directory does not exist. -func TestCopyErrDstParentNotExists(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} - - // Try with a file source. - content, err := TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - // Copy to a file whose parent does not exist. - if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { - t.Fatal("expected IsNotExist error, but got nil instead") - } - - if !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } - - // Try with a directory source. - srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} - - content, err = TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - // Copy to a directory whose parent does not exist. - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { - t.Fatal("expected IsNotExist error, but got nil instead") - } - - if !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } -} - -// Test for error when DST ends in a trailing -// path separator but exists as a file. -func TestCopyErrDstNotDir(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - // Try with a file source. - srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} - - content, err := TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { - t.Fatal("expected IsNotDir error, but got nil instead") - } - - if !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } - - // Try with a directory source. - srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} - - content, err = TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { - t.Fatal("expected IsNotDir error, but got nil instead") - } - - if !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } -} - -// Possibilities are reduced to the remaining 10 cases: -// -// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action -// =================================================================================================== -// A | no | - | no | - | no | create file -// B | no | - | no | - | yes | error -// C | no | - | yes | no | - | overwrite file -// D | no | - | yes | yes | - | create file in dst dir -// E | yes | no | no | - | - | create dir, copy contents -// F | yes | no | yes | no | - | error -// G | yes | no | yes | yes | - | copy dir and contents -// H | yes | yes | no | - | - | create dir, copy contents -// I | yes | yes | yes | no | - | error -// J | yes | yes | yes | yes | - | copy dir contents -// - -// A. SRC specifies a file and DST (no trailing path separator) doesn't -// exist. This should create a file with the name DST and copy the -// contents of the source file into it. -func TestCopyCaseA(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcPath := filepath.Join(tmpDirA, "file1") - dstPath := filepath.Join(tmpDirB, "itWorks.txt") - - var err error - - if err = testCopyHelper(t, srcPath, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } -} - -// B. SRC specifies a file and DST (with trailing path separator) doesn't -// exist. This should cause an error because the copy operation cannot -// create a directory when copying a single file. -func TestCopyCaseB(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcPath := filepath.Join(tmpDirA, "file1") - dstDir := joinTrailingSep(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcPath, dstDir); err == nil { - t.Fatal("expected ErrDirNotExists error, but got nil instead") - } - - if err != ErrDirNotExists { - t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) - } -} - -// C. SRC specifies a file and DST exists as a file. This should overwrite -// the file at DST with the contents of the source file. -func TestCopyCaseC(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcPath := filepath.Join(tmpDirA, "file1") - dstPath := filepath.Join(tmpDirB, "file2") - - var err error - - // Ensure they start out different. - if err = fileContentsEqual(t, srcPath, dstPath); err == nil { - t.Fatal("expected different file contents") - } - - if err = testCopyHelper(t, srcPath, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } -} - -// D. SRC specifies a file and DST exists as a directory. This should place -// a copy of the source file inside it using the basename from SRC. Ensure -// this works whether DST has a trailing path separator or not. -func TestCopyCaseD(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcPath := filepath.Join(tmpDirA, "file1") - dstDir := filepath.Join(tmpDirB, "dir1") - dstPath := filepath.Join(dstDir, "file1") - - var err error - - // Ensure that dstPath doesn't exist. - if _, err = os.Stat(dstPath); !os.IsNotExist(err) { - t.Fatalf("did not expect dstPath %q to exist", dstPath) - } - - if err = testCopyHelper(t, srcPath, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir1") - - if err = testCopyHelper(t, srcPath, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } -} - -// E. SRC specifies a directory and DST does not exist. This should create a -// directory at DST and copy the contents of the SRC directory into the DST -// directory. Ensure this works whether DST has a trailing path separator or -// not. -func TestCopyCaseE(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcDir := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "testDir") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } -} - -// F. SRC specifies a directory and DST exists as a file. This should cause an -// error as it is not possible to overwrite a file with a directory. -func TestCopyCaseF(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := filepath.Join(tmpDirA, "dir1") - dstFile := filepath.Join(tmpDirB, "file1") - - var err error - - if err = testCopyHelper(t, srcDir, dstFile); err == nil { - t.Fatal("expected ErrCannotCopyDir error, but got nil instead") - } - - if err != ErrCannotCopyDir { - t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) - } -} - -// G. SRC specifies a directory and DST exists as a directory. This should copy -// the SRC directory and all its contents to the DST directory. Ensure this -// works whether DST has a trailing path separator or not. -func TestCopyCaseG(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "dir2") - resultDir := filepath.Join(dstDir, "dir1") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, resultDir, srcDir); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir2") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, resultDir, srcDir); err != nil { - t.Fatal(err) - } -} - -// H. SRC specifies a directory's contents only and DST does not exist. This -// should create a directory at DST and copy the contents of the SRC -// directory (but not the directory itself) into the DST directory. Ensure -// this works whether DST has a trailing path separator or not. -func TestCopyCaseH(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - dstDir := filepath.Join(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "testDir") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } -} - -// I. SRC specifies a directory's contents only and DST exists as a file. This -// should cause an error as it is not possible to overwrite a file with a -// directory. -func TestCopyCaseI(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - dstFile := filepath.Join(tmpDirB, "file1") - - var err error - - if err = testCopyHelper(t, srcDir, dstFile); err == nil { - t.Fatal("expected ErrCannotCopyDir error, but got nil instead") - } - - if err != ErrCannotCopyDir { - t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) - } -} - -// J. SRC specifies a directory's contents only and DST exists as a directory. -// This should copy the contents of the SRC directory (but not the directory -// itself) into the DST directory. Ensure this works whether DST has a -// trailing path separator or not. -func TestCopyCaseJ(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - dstDir := filepath.Join(tmpDirB, "dir5") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir5") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go deleted file mode 100644 index e305b5e4..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.ToSlash(path) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go deleted file mode 100644 index 2b775b45..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go deleted file mode 100644 index 5ec71110..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go +++ /dev/null @@ -1,264 +0,0 @@ -package archive - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { - tr := tar.NewReader(layer) - trBuf := pools.BufioReader32KPool.Get(tr) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return 0, err - } - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - if options == nil { - options = &TarOptions{} - } - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - // Windows does not support filenames with colons in them. Ignore - // these files. This is not a problem though (although it might - // appear that it is). Let's suppose a client is running docker pull. - // The daemon it points to is Windows. Would it make sense for the - // client to be doing a docker pull Ubuntu for example (which has files - // with colons in the name under /usr/share/man/man3)? No, absolutely - // not as it would really only make sense that they were pulling a - // Windows image. However, for development, it is necessary to be able - // to pull Linux images which are in the repository. - // - // TODO Windows. Once the registry is aware of what images are Windows- - // specific or Linux-specific, this warning should be changed to an error - // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertently. - if runtime.GOOS == "windows" { - if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) - continue - } - } - - // Note as these operations are platform specific, so must the slash be. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600) - if err != nil { - return 0, err - } - } - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { - return 0, err - } - } - - if hdr.Name != WhiteoutOpaqueDir { - continue - } - } - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return 0, err - } - - // Note as these operations are platform specific, so must the slash be. - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - base := filepath.Base(path) - - if strings.HasPrefix(base, WhiteoutPrefix) { - dir := filepath.Dir(path) - if base == WhiteoutOpaqueDir { - fi, err := os.Lstat(dir) - if err != nil && !os.IsNotExist(err) { - return 0, err - } - if err := os.RemoveAll(dir); err != nil { - return 0, err - } - if err := os.Mkdir(dir, fi.Mode()&os.ModePerm); err != nil { - return 0, err - } - } else { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - trBuf.Reset(tr) - srcData := io.Reader(trBuf) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if srcHdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) - if err != nil { - return 0, err - } - srcHdr.Uid = xUID - } - if srcHdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) - if err != nil { - return 0, err - } - srcHdr.Gid = xGID - } - if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return 0, err - } - } - - return size, nil -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer Reader) (int64, error) { - return applyLayerHandler(dest, layer, &TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} - -// do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform - - if decompress { - layer, err = DecompressStream(layer) - if err != nil { - return 0, err - } - } - return UnpackLayer(dest, layer, options) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go deleted file mode 100644 index 01ed4372..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package archive - -import ( - "archive/tar" - "testing" -) - -func TestApplyLayerInvalidFilenames(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { - { - Name: "../victim/dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { - { - // Note the leading slash - Name: "/../victim/slash-dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerInvalidHardlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeLink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeLink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (hardlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try reading victim/hello (hardlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try removing victim directory (hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerInvalidSymlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeSymlink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeSymlink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try removing victim directory (symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go deleted file mode 100644 index a5e08e4e..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build ignore - -// Simple tool to create an archive stream from an old and new directory -// -// By default it will stream the comparison of two temporary directories with junk files -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" -) - -var ( - flDebug = flag.Bool("D", false, "debugging output") - flNewDir = flag.String("newdir", "", "") - flOldDir = flag.String("olddir", "", "") - log = logrus.New() -) - -func main() { - flag.Usage = func() { - fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") - fmt.Printf("%s [OPTIONS]\n", os.Args[0]) - flag.PrintDefaults() - } - flag.Parse() - log.Out = os.Stderr - if (len(os.Getenv("DEBUG")) > 0) || *flDebug { - logrus.SetLevel(logrus.DebugLevel) - } - var newDir, oldDir string - - if len(*flNewDir) == 0 { - var err error - newDir, err = ioutil.TempDir("", "docker-test-newDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(newDir) - if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { - log.Fatal(err) - } - } else { - newDir = *flNewDir - } - - if len(*flOldDir) == 0 { - oldDir, err := ioutil.TempDir("", "docker-test-oldDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(oldDir) - } else { - oldDir = *flOldDir - } - - changes, err := archive.ChangesDirs(newDir, oldDir) - if err != nil { - log.Fatal(err) - } - - a, err := archive.ExportChanges(newDir, changes) - if err != nil { - log.Fatal(err) - } - defer a.Close() - - i, err := io.Copy(os.Stdout, a) - if err != nil && err != io.EOF { - log.Fatal(err) - } - fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go deleted file mode 100644 index 3448569b..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go deleted file mode 100644 index e85aac05..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - nsec := int64(0) - if !time.IsZero() { - nsec = time.UnixNano() - } - return syscall.NsecToTimespec(nsec) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go deleted file mode 100644 index 98719032..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" -) - -var testUntarFns = map[string]func(string, io.Reader) error{ - "untar": func(dest string, r io.Reader) error { - return Untar(r, dest, nil) - }, - "applylayer": func(dest string, r io.Reader) error { - _, err := ApplyLayer(dest, Reader(r)) - return err - }, -} - -// testBreakout is a helper function that, within the provided `tmpdir` directory, -// creates a `victim` folder with a generated `hello` file in it. -// `untar` extracts to a directory named `dest`, the tar file created from `headers`. -// -// Here are the tested scenarios: -// - removed `victim` folder (write) -// - removed files from `victim` folder (write) -// - new files in `victim` folder (write) -// - modified files in `victim` folder (write) -// - file in `dest` with same content as `victim/hello` (read) -// -// When using testBreakout make sure you cover one of the scenarios listed above. -func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { - tmpdir, err := ioutil.TempDir("", tmpdir) - if err != nil { - return err - } - defer os.RemoveAll(tmpdir) - - dest := filepath.Join(tmpdir, "dest") - if err := os.Mkdir(dest, 0755); err != nil { - return err - } - - victim := filepath.Join(tmpdir, "victim") - if err := os.Mkdir(victim, 0755); err != nil { - return err - } - hello := filepath.Join(victim, "hello") - helloData, err := time.Now().MarshalText() - if err != nil { - return err - } - if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { - return err - } - helloStat, err := os.Stat(hello) - if err != nil { - return err - } - - reader, writer := io.Pipe() - go func() { - t := tar.NewWriter(writer) - for _, hdr := range headers { - t.WriteHeader(hdr) - } - t.Close() - }() - - untar := testUntarFns[untarFn] - if untar == nil { - return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) - } - if err := untar(dest, reader); err != nil { - if _, ok := err.(breakoutError); !ok { - // If untar returns an error unrelated to an archive breakout, - // then consider this an unexpected error and abort. - return err - } - // Here, untar detected the breakout. - // Let's move on verifying that indeed there was no breakout. - fmt.Printf("breakoutError: %v\n", err) - } - - // Check victim folder - f, err := os.Open(victim) - if err != nil { - // codepath taken if victim folder was removed - return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) - } - defer f.Close() - - // Check contents of victim folder - // - // We are only interested in getting 2 files from the victim folder, because if all is well - // we expect only one result, the `hello` file. If there is a second result, it cannot - // hold the same name `hello` and we assume that a new file got created in the victim folder. - // That is enough to detect an archive breakout. - names, err := f.Readdirnames(2) - if err != nil { - // codepath taken if victim is not a folder - return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) - } - for _, name := range names { - if name != "hello" { - // codepath taken if new file was created in victim folder - return fmt.Errorf("archive breakout: new file %q", name) - } - } - - // Check victim/hello - f, err = os.Open(hello) - if err != nil { - // codepath taken if read permissions were removed - return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) - } - defer f.Close() - b, err := ioutil.ReadAll(f) - if err != nil { - return err - } - fi, err := f.Stat() - if err != nil { - return err - } - if helloStat.IsDir() != fi.IsDir() || - // TODO: cannot check for fi.ModTime() change - helloStat.Mode() != fi.Mode() || - helloStat.Size() != fi.Size() || - !bytes.Equal(helloData, b) { - // codepath taken if hello has been modified - return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) - } - - // Check that nothing in dest/ has the same content as victim/hello. - // Since victim/hello was generated with time.Now(), it is safe to assume - // that any file whose content matches exactly victim/hello, managed somehow - // to access victim/hello. - return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { - if info.IsDir() { - if err != nil { - // skip directory if error - return filepath.SkipDir - } - // enter directory - return nil - } - if err != nil { - // skip file if error - return nil - } - b, err := ioutil.ReadFile(path) - if err != nil { - // Houston, we have a problem. Aborting (space)walk. - return err - } - if bytes.Equal(helloData, b) { - return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) - } - return nil - }) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go deleted file mode 100644 index 3d9c3132..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go +++ /dev/null @@ -1,23 +0,0 @@ -package archive - -// Whiteouts are files with a special meaning for the layered filesystem. -// Docker uses AUFS whiteout files inside exported archives. In other -// filesystems these files are generated/handled on tar creation/extraction. - -// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a -// filename this means that file has been removed from the base layer. -const WhiteoutPrefix = ".wh." - -// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for remoing an actaul file. Normally these files are excluded from exported -// archives. -const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix - -// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other -// layers. Normally these should not go into exported archives and all changed -// hardlinks should be copied to the top layer. -const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" - -// WhiteoutOpaqueDir file means directory has been made opaque - meaning -// readdir calls to this directory do not follow to lower layers. -const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go deleted file mode 100644 index dfb335c0..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "io/ioutil" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// * ./foo.txt with content "hello world" -// * ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (Archive, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return ioutil.NopCloser(buf), nil -} - -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go deleted file mode 100644 index 46ab3669..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "io" - "testing" -) - -func TestGenerateEmptyFile(t *testing.T) { - archive, err := Generate("emptyFile") - if err != nil { - t.Fatal(err) - } - if archive == nil { - t.Fatal("The generated archive should not be nil.") - } - - expectedFiles := [][]string{ - {"emptyFile", ""}, - } - - tr := tar.NewReader(archive) - actualFiles := make([][]string, 0, 10) - i := 0 - for { - hdr, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - buf := new(bytes.Buffer) - buf.ReadFrom(tr) - content := buf.String() - actualFiles = append(actualFiles, []string{hdr.Name, content}) - i++ - } - if len(actualFiles) != len(expectedFiles) { - t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) - } - for i := 0; i < len(expectedFiles); i++ { - actual := actualFiles[i] - expected := expectedFiles[i] - if actual[0] != expected[0] { - t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) - } - if actual[1] != expected[1] { - t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) - } - } -} - -func TestGenerateWithContent(t *testing.T) { - archive, err := Generate("file", "content") - if err != nil { - t.Fatal(err) - } - if archive == nil { - t.Fatal("The generated archive should not be nil.") - } - - expectedFiles := [][]string{ - {"file", "content"}, - } - - tr := tar.NewReader(archive) - actualFiles := make([][]string, 0, 10) - i := 0 - for { - hdr, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - buf := new(bytes.Buffer) - buf.ReadFrom(tr) - content := buf.String() - actualFiles = append(actualFiles, []string{hdr.Name, content}) - i++ - } - if len(actualFiles) != len(expectedFiles) { - t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) - } - for i := 0; i < len(expectedFiles); i++ { - actual := actualFiles[i] - expected := expectedFiles[i] - if actual[0] != expected[0] { - t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) - } - if actual[1] != expected[1] { - t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go deleted file mode 100644 index 5559732a..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go +++ /dev/null @@ -1,184 +0,0 @@ -package fileutils - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" -) - -// exclusion return true if the specified pattern is an exclusion -func exclusion(pattern string) bool { - return pattern[0] == '!' -} - -// empty return true if the specified pattern is empty -func empty(pattern string) bool { - return pattern == "" -} - -// CleanPatterns takes a slice of patterns returns a new -// slice of patterns cleaned with filepath.Clean, stripped -// of any empty patterns and lets the caller know whether the -// slice contains any exception patterns (prefixed with !). -func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { - // Loop over exclusion patterns and: - // 1. Clean them up. - // 2. Indicate whether we are dealing with any exception rules. - // 3. Error if we see a single exclusion marker on it's own (!). - cleanedPatterns := []string{} - patternDirs := [][]string{} - exceptions := false - for _, pattern := range patterns { - // Eliminate leading and trailing whitespace. - pattern = strings.TrimSpace(pattern) - if empty(pattern) { - continue - } - if exclusion(pattern) { - if len(pattern) == 1 { - return nil, nil, false, errors.New("Illegal exclusion pattern: !") - } - exceptions = true - } - pattern = filepath.Clean(pattern) - cleanedPatterns = append(cleanedPatterns, pattern) - if exclusion(pattern) { - pattern = pattern[1:] - } - patternDirs = append(patternDirs, strings.Split(pattern, "/")) - } - - return cleanedPatterns, patternDirs, exceptions, nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - patterns, patDirs, _, err := CleanPatterns(patterns) - if err != nil { - return false, err - } - - return OptimizedMatches(file, patterns, patDirs) -} - -// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. -// It will assume that the inputs have been preprocessed and therefore the function -// doen't need to do as much error checking and clean-up. This was done to avoid -// repeating these steps on each file being checked during the archive process. -// The more generic fileutils.Matches() can't make these assumptions. -func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { - matched := false - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, "/") - - for i, pattern := range patterns { - negative := false - - if exclusion(pattern) { - negative = true - pattern = pattern[1:] - } - - match, err := filepath.Match(pattern, file) - if err != nil { - return false, err - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - if len(patDirs[i]) <= len(parentPathDirs) { - match, _ = filepath.Match(strings.Join(patDirs[i], "/"), - strings.Join(parentPathDirs[:len(patDirs[i])], "/")) - } - } - - if match { - matched = !negative - } - } - - if matched { - logrus.Debugf("Skipping excluded path: %s", file) - } - - return matched, nil -} - -// CopyFile copies from src to dst until either EOF is reached -// on src or an error occurs. It verifies src exists and remove -// the dst if it exists. -func CopyFile(src, dst string) (int64, error) { - cleanSrc := filepath.Clean(src) - cleanDst := filepath.Clean(dst) - if cleanSrc == cleanDst { - return 0, nil - } - sf, err := os.Open(cleanSrc) - if err != nil { - return 0, err - } - defer sf.Close() - if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { - return 0, err - } - df, err := os.Create(cleanDst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - -// ReadSymlinkedDirectory returns the target directory of a symlink. -// The target of the symbolic link may not be a file. -func ReadSymlinkedDirectory(path string) (string, error) { - var realPath string - var err error - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) - } - realPathInfo, err := os.Stat(realPath) - if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) - } - if !realPathInfo.Mode().IsDir() { - return "", fmt.Errorf("canonical path points to a file '%s'", realPath) - } - return realPath, nil -} - -// CreateIfNotExists creates a file or a directory only if it does not already exist. -func CreateIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - if isDir { - return os.MkdirAll(path, 0755) - } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go deleted file mode 100644 index b544ffbf..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go +++ /dev/null @@ -1,402 +0,0 @@ -package fileutils - -import ( - "io/ioutil" - "os" - "path" - "path/filepath" - "testing" -) - -// CopyFile with invalid src -func TestCopyFileWithInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) - if err == nil { - t.Fatal("Should have fail to copy an invalid src file") - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes") - } - -} - -// CopyFile with invalid dest -func TestCopyFileWithInvalidDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - src := path.Join(tempFolder, "file") - err = ioutil.WriteFile(src, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) - if err == nil { - t.Fatal("Should have fail to copy an invalid src file") - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes") - } - -} - -// CopyFile with same src and dest -func TestCopyFileWithSameSrcAndDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - file := path.Join(tempFolder, "file") - err = ioutil.WriteFile(file, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(file, file) - if err != nil { - t.Fatal(err) - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes as it is the same file.") - } -} - -// CopyFile with same src and dest but path is different and not clean -func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - testFolder := path.Join(tempFolder, "test") - err = os.MkdirAll(testFolder, 0740) - if err != nil { - t.Fatal(err) - } - file := path.Join(testFolder, "file") - sameFile := testFolder + "/../test/file" - err = ioutil.WriteFile(file, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(file, sameFile) - if err != nil { - t.Fatal(err) - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes as it is the same file.") - } -} - -func TestCopyFile(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - src := path.Join(tempFolder, "src") - dest := path.Join(tempFolder, "dest") - ioutil.WriteFile(src, []byte("content"), 0777) - ioutil.WriteFile(dest, []byte("destContent"), 0777) - bytes, err := CopyFile(src, dest) - if err != nil { - t.Fatal(err) - } - if bytes != 7 { - t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) - } - actual, err := ioutil.ReadFile(dest) - if err != nil { - t.Fatal(err) - } - if string(actual) != "content" { - t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") - } -} - -// Reading a symlink to a directory must return the directory -func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { - var err error - if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { - t.Errorf("failed to create directory: %s", err) - } - - if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { - t.Fatalf("failed to read symlink to directory: %s", err) - } - - if path != "/tmp/testReadSymlinkToExistingDirectory" { - t.Fatalf("symlink returned unexpected directory: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { - t.Errorf("failed to remove temporary directory: %s", err) - } - - if err = os.Remove("/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -// Reading a non-existing symlink must fail -func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { - var path string - var err error - if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { - t.Fatalf("error expected for non-existing symlink") - } - - if path != "" { - t.Fatalf("expected empty path, but '%s' was returned", path) - } -} - -// Reading a symlink to a file must fail -func TestReadSymlinkedDirectoryToFile(t *testing.T) { - var err error - var file *os.File - - if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { - t.Fatalf("failed to create file: %s", err) - } - - file.Close() - - if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { - t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") - } - - if path != "" { - t.Fatalf("path should've been empty: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { - t.Errorf("failed to remove file: %s", err) - } - - if err = os.Remove("/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -func TestWildcardMatches(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*"}) - if match != true { - t.Errorf("failed to get a wildcard match, got %v", match) - } -} - -// A simple pattern match should return true. -func TestPatternMatches(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*.go"}) - if match != true { - t.Errorf("failed to get a match, got %v", match) - } -} - -// An exclusion followed by an inclusion should return true. -func TestExclusionPatternMatchesPatternBefore(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) - if match != true { - t.Errorf("failed to get true match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) - if match != false { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) - if match != false { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) - if match != false { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A pattern followed by an exclusion should return false. -func TestExclusionPatternMatchesPatternAfter(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) - if match != false { - t.Errorf("failed to get false match on exclusion pattern, got %v", match) - } -} - -// A filename evaluating to . should return false. -func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { - match, _ := Matches(".", []string{"*.go"}) - if match != false { - t.Errorf("failed to get false match on ., got %v", match) - } -} - -// A single ! pattern should return an error. -func TestSingleExclamationError(t *testing.T) { - _, err := Matches("fileutils.go", []string{"!"}) - if err == nil { - t.Errorf("failed to get an error for a single exclamation point, got %v", err) - } -} - -// A string preceded with a ! should return true from Exclusion. -func TestExclusion(t *testing.T) { - exclusion := exclusion("!") - if !exclusion { - t.Errorf("failed to get true for a single !, got %v", exclusion) - } -} - -// Matches with no patterns -func TestMatchesWithNoPatterns(t *testing.T) { - matches, err := Matches("/any/path/there", []string{}) - if err != nil { - t.Fatal(err) - } - if matches { - t.Fatalf("Should not have match anything") - } -} - -// Matches with malformed patterns -func TestMatchesWithMalformedPatterns(t *testing.T) { - matches, err := Matches("/any/path/there", []string{"["}) - if err == nil { - t.Fatal("Should have failed because of a malformed syntax in the pattern") - } - if matches { - t.Fatalf("Should not have match anything") - } -} - -// An empty string should return true from Empty. -func TestEmpty(t *testing.T) { - empty := empty("") - if !empty { - t.Errorf("failed to get true for an empty string, got %v", empty) - } -} - -func TestCleanPatterns(t *testing.T) { - cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) - if len(cleaned) != 2 { - t.Errorf("expected 2 element slice, got %v", len(cleaned)) - } -} - -func TestCleanPatternsStripEmptyPatterns(t *testing.T) { - cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) - if len(cleaned) != 2 { - t.Errorf("expected 2 element slice, got %v", len(cleaned)) - } -} - -func TestCleanPatternsExceptionFlag(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) - } -} - -func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) - } -} - -func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) - } -} - -func TestCleanPatternsErrorSingleException(t *testing.T) { - _, _, _, err := CleanPatterns([]string{"!"}) - if err == nil { - t.Errorf("expected error on single exclamation point, got %v", err) - } -} - -func TestCleanPatternsFolderSplit(t *testing.T) { - _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) - if dirs[0][0] != "docs" { - t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) - } - if dirs[0][1] != "config" { - t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) - } -} - -func TestCreateIfNotExistsDir(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - - folderToCreate := filepath.Join(tempFolder, "tocreate") - - if err := CreateIfNotExists(folderToCreate, true); err != nil { - t.Fatal(err) - } - fileinfo, err := os.Stat(folderToCreate) - if err != nil { - t.Fatalf("Should have create a folder, got %v", err) - } - - if !fileinfo.IsDir() { - t.Fatalf("Should have been a dir, seems it's not") - } -} - -func TestCreateIfNotExistsFile(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - - fileToCreate := filepath.Join(tempFolder, "file/to/create") - - if err := CreateIfNotExists(fileToCreate, false); err != nil { - t.Fatal(err) - } - fileinfo, err := os.Stat(fileToCreate) - if err != nil { - t.Fatalf("Should have create a file, got %v", err) - } - - if fileinfo.IsDir() { - t.Fatalf("Should have been a file, seems it's not") - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go deleted file mode 100644 index 7e00802c..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build linux freebsd - -package fileutils - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" -) - -// GetTotalUsedFds Returns the number of used File Descriptors by -// reading it via /proc filesystem. -func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) - } else { - return len(fds) - } - return -1 -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go deleted file mode 100644 index 5ec21cac..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. Not supported -// on Windows. -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go deleted file mode 100644 index dcae1788..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go +++ /dev/null @@ -1,39 +0,0 @@ -package homedir - -import ( - "os" - "runtime" - - "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - if runtime.GOOS == "windows" { - return "USERPROFILE" - } - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - home := os.Getenv(Key()) - if home == "" && runtime.GOOS != "windows" { - if u, err := user.CurrentUser(); err == nil { - return u.Home - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - if runtime.GOOS == "windows" { - return "%USERPROFILE%" // be careful while using in format functions - } - return "~" -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go deleted file mode 100644 index 7a95cb2b..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package homedir - -import ( - "path/filepath" - "testing" -) - -func TestGet(t *testing.T) { - home := Get() - if home == "" { - t.Fatal("returned home directory is empty") - } - - if !filepath.IsAbs(home) { - t.Fatalf("returned path is not absolute: %s", home) - } -} - -func TestGetShortcutString(t *testing.T) { - shortcut := GetShortcutString() - if shortcut == "" { - t.Fatal("returned shortcut string is empty") - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go deleted file mode 100644 index a1301ee9..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go +++ /dev/null @@ -1,195 +0,0 @@ -package idtools - -import ( - "bufio" - "fmt" - "os" - "sort" - "strconv" - "strings" -) - -// IDMap contains a single entry for user namespace range remapping. An array -// of IDMap entries represents the structure that will be provided to the Linux -// kernel for creating a user namespace. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -type subIDRange struct { - Start int - Length int -} - -type ranges []subIDRange - -func (e ranges) Len() int { return len(e) } -func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } - -const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" -) - -// MkdirAllAs creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership to the requested uid/gid pair. -func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, true) -} - -// MkdirAllNewAs creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership will be performed -func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, false) -} - -// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, false, true) -} - -// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - var uid, gid int - - if uidMap != nil { - xUID, err := ToHost(0, uidMap) - if err != nil { - return -1, -1, err - } - uid = xUID - } - if gidMap != nil { - xGID, err := ToHost(0, gidMap) - if err != nil { - return -1, -1, err - } - gid = xGID - } - return uid, gid, nil -} - -// ToContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func ToContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { - contID := m.ContainerID + (hostID - m.HostID) - return contID, nil - } - } - return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) -} - -// ToHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func ToHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (contID - m.ContainerID) - return hostID, nil - } - } - return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) -} - -// CreateIDMappings takes a requested user and group name and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { - subuidRanges, err := parseSubuid(username) - if err != nil { - return nil, nil, err - } - subgidRanges, err := parseSubgid(groupname) - if err != nil { - return nil, nil, err - } - if len(subuidRanges) == 0 { - return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) - } - if len(subgidRanges) == 0 { - return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) - } - - return createIDMap(subuidRanges), createIDMap(subgidRanges), nil -} - -func createIDMap(subidRanges ranges) []IDMap { - idMap := []IDMap{} - - // sort the ranges by lowest ID first - sort.Sort(subidRanges) - containerID := 0 - for _, idrange := range subidRanges { - idMap = append(idMap, IDMap{ - ContainerID: containerID, - HostID: idrange.Start, - Size: idrange.Length, - }) - containerID = containerID + idrange.Length - } - return idMap -} - -func parseSubuid(username string) (ranges, error) { - return parseSubidFile(subuidFileName, username) -} - -func parseSubgid(username string) (ranges, error) { - return parseSubidFile(subgidFileName, username) -} - -func parseSubidFile(path, username string) (ranges, error) { - var rangeList ranges - - subidFile, err := os.Open(path) - if err != nil { - return rangeList, err - } - defer subidFile.Close() - - s := bufio.NewScanner(subidFile) - for s.Scan() { - if err := s.Err(); err != nil { - return rangeList, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" { - continue - } - parts := strings.Split(text, ":") - if len(parts) != 3 { - return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) - } - if parts[0] == username { - // return the first entry for a user; ignores potential for multiple ranges per user - startid, err := strconv.Atoi(parts[1]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - length, err := strconv.Atoi(parts[2]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - rangeList = append(rangeList, subIDRange{startid, length}) - } - } - return rangeList, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go deleted file mode 100644 index 0444307d..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build !windows - -package idtools - -import ( - "os" - "path/filepath" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - // make an array containing the original path asked for, plus (for mkAll == true) - // all path components leading up to the complete path that don't exist before we MkdirAll - // so that we can chown all of them properly at the end. If chownExisting is false, we won't - // chown the full directory path if it exists - var paths []string - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - paths = []string{path} - } else if err == nil && chownExisting { - if err := os.Chown(path, ownerUID, ownerGID); err != nil { - return err - } - // short-circuit--we were called with an existing directory and chown was requested - return nil - } else if err == nil { - // nothing to do; directory path fully exists already and chown was NOT requested - return nil - } - - if mkAll { - // walk back to "/" looking for directories which do not exist - // and add them to the paths array for chown after creation - dirPath := path - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { - paths = append(paths, dirPath) - } - } - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { - return err - } - } else { - if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { - return err - } - } - // even if it existed, we will chown the requested path + any subpaths that - // didn't exist when we called MkdirAll - for _, pathComponent := range paths { - if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix_test.go deleted file mode 100644 index 55b338c9..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix_test.go +++ /dev/null @@ -1,243 +0,0 @@ -// +build !windows - -package idtools - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" -) - -type node struct { - uid int - gid int -} - -func TestMkdirAllAs(t *testing.T) { - dirName, err := ioutil.TempDir("", "mkdirall") - if err != nil { - t.Fatalf("Couldn't create temp dir: %v", err) - } - defer os.RemoveAll(dirName) - - testTree := map[string]node{ - "usr": {0, 0}, - "usr/bin": {0, 0}, - "lib": {33, 33}, - "lib/x86_64": {45, 45}, - "lib/x86_64/share": {1, 1}, - } - - if err := buildTree(dirName, testTree); err != nil { - t.Fatal(err) - } - - // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid - if err := MkdirAllAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { - t.Fatal(err) - } - testTree["usr/share"] = node{99, 99} - verifyTree, err := readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // test 2-deep new directories--both should be owned by the uid/gid pair - if err := MkdirAllAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { - t.Fatal(err) - } - testTree["lib/some"] = node{101, 101} - testTree["lib/some/other"] = node{101, 101} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // test a directory that already exists; should be chowned, but nothing else - if err := MkdirAllAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { - t.Fatal(err) - } - testTree["usr"] = node{102, 102} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } -} - -func TestMkdirAllNewAs(t *testing.T) { - - dirName, err := ioutil.TempDir("", "mkdirnew") - if err != nil { - t.Fatalf("Couldn't create temp dir: %v", err) - } - defer os.RemoveAll(dirName) - - testTree := map[string]node{ - "usr": {0, 0}, - "usr/bin": {0, 0}, - "lib": {33, 33}, - "lib/x86_64": {45, 45}, - "lib/x86_64/share": {1, 1}, - } - - if err := buildTree(dirName, testTree); err != nil { - t.Fatal(err) - } - - // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid - if err := MkdirAllNewAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { - t.Fatal(err) - } - testTree["usr/share"] = node{99, 99} - verifyTree, err := readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // test 2-deep new directories--both should be owned by the uid/gid pair - if err := MkdirAllNewAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { - t.Fatal(err) - } - testTree["lib/some"] = node{101, 101} - testTree["lib/some/other"] = node{101, 101} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // test a directory that already exists; should NOT be chowned - if err := MkdirAllNewAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { - t.Fatal(err) - } - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } -} - -func TestMkdirAs(t *testing.T) { - - dirName, err := ioutil.TempDir("", "mkdir") - if err != nil { - t.Fatalf("Couldn't create temp dir: %v", err) - } - defer os.RemoveAll(dirName) - - testTree := map[string]node{ - "usr": {0, 0}, - } - if err := buildTree(dirName, testTree); err != nil { - t.Fatal(err) - } - - // test a directory that already exists; should just chown to the requested uid/gid - if err := MkdirAs(filepath.Join(dirName, "usr"), 0755, 99, 99); err != nil { - t.Fatal(err) - } - testTree["usr"] = node{99, 99} - verifyTree, err := readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // create a subdir under a dir which doesn't exist--should fail - if err := MkdirAs(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, 102, 102); err == nil { - t.Fatalf("Trying to create a directory with Mkdir where the parent doesn't exist should have failed") - } - - // create a subdir under an existing dir; should only change the ownership of the new subdir - if err := MkdirAs(filepath.Join(dirName, "usr", "bin"), 0755, 102, 102); err != nil { - t.Fatal(err) - } - testTree["usr/bin"] = node{102, 102} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } -} - -func buildTree(base string, tree map[string]node) error { - for path, node := range tree { - fullPath := filepath.Join(base, path) - if err := os.MkdirAll(fullPath, 0755); err != nil { - return fmt.Errorf("Couldn't create path: %s; error: %v", fullPath, err) - } - if err := os.Chown(fullPath, node.uid, node.gid); err != nil { - return fmt.Errorf("Couldn't chown path: %s; error: %v", fullPath, err) - } - } - return nil -} - -func readTree(base, root string) (map[string]node, error) { - tree := make(map[string]node) - - dirInfos, err := ioutil.ReadDir(base) - if err != nil { - return nil, fmt.Errorf("Couldn't read directory entries for %q: %v", base, err) - } - - for _, info := range dirInfos { - s := &syscall.Stat_t{} - if err := syscall.Stat(filepath.Join(base, info.Name()), s); err != nil { - return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err) - } - tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)} - if info.IsDir() { - // read the subdirectory - subtree, err := readTree(filepath.Join(base, info.Name()), filepath.Join(root, info.Name())) - if err != nil { - return nil, err - } - for path, nodeinfo := range subtree { - tree[path] = nodeinfo - } - } - } - return tree, nil -} - -func compareTrees(left, right map[string]node) error { - if len(left) != len(right) { - return fmt.Errorf("Trees aren't the same size") - } - for path, nodeLeft := range left { - if nodeRight, ok := right[path]; ok { - if nodeRight.uid != nodeLeft.uid || nodeRight.gid != nodeLeft.gid { - // mismatch - return fmt.Errorf("mismatched ownership for %q: expected: %d:%d, got: %d:%d", path, - nodeLeft.uid, nodeLeft.gid, nodeRight.uid, nodeRight.gid) - } - continue - } - return fmt.Errorf("right tree didn't contain path %q", path) - } - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go deleted file mode 100644 index d5ec992d..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package idtools - -import ( - "os" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { - return err - } - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go deleted file mode 100644 index c1eedff1..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ /dev/null @@ -1,155 +0,0 @@ -package idtools - -import ( - "fmt" - "os/exec" - "path/filepath" - "strings" - "syscall" -) - -// add a user and/or group to Linux /etc/passwd, /etc/group using standard -// Linux distribution commands: -// adduser --uid --shell /bin/login --no-create-home --disabled-login --ingroup -// useradd -M -u -s /bin/nologin -N -g -// addgroup --gid -// groupadd -g - -const baseUID int = 10000 -const baseGID int = 10000 -const idMAX int = 65534 - -var ( - userCommand string - groupCommand string - - cmdTemplates = map[string]string{ - "adduser": "--uid %d --shell /bin/false --no-create-home --disabled-login --ingroup %s %s", - "useradd": "-M -u %d -s /bin/false -N -g %s %s", - "addgroup": "--gid %d %s", - "groupadd": "-g %d %s", - } -) - -func init() { - // set up which commands are used for adding users/groups dependent on distro - if _, err := resolveBinary("adduser"); err == nil { - userCommand = "adduser" - } else if _, err := resolveBinary("useradd"); err == nil { - userCommand = "useradd" - } - if _, err := resolveBinary("addgroup"); err == nil { - groupCommand = "addgroup" - } else if _, err := resolveBinary("groupadd"); err == nil { - groupCommand = "groupadd" - } -} - -func resolveBinary(binname string) (string, error) { - binaryPath, err := exec.LookPath(binname) - if err != nil { - return "", err - } - resolvedPath, err := filepath.EvalSymlinks(binaryPath) - if err != nil { - return "", err - } - //only return no error if the final resolved binary basename - //matches what was searched for - if filepath.Base(resolvedPath) == binname { - return resolvedPath, nil - } - return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) -} - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -// This new user's /etc/sub{uid,gid} ranges will be used for user namespace -// mapping ranges in containers. -func AddNamespaceRangesUser(name string) (int, int, error) { - // Find unused uid, gid pair - uid, err := findUnusedUID(baseUID) - if err != nil { - return -1, -1, fmt.Errorf("Unable to find unused UID: %v", err) - } - gid, err := findUnusedGID(baseGID) - if err != nil { - return -1, -1, fmt.Errorf("Unable to find unused GID: %v", err) - } - - // First add the group that we will use - if err := addGroup(name, gid); err != nil { - return -1, -1, fmt.Errorf("Error adding group %q: %v", name, err) - } - // Add the user as a member of the group - if err := addUser(name, uid, name); err != nil { - return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) - } - return uid, gid, nil -} - -func addUser(userName string, uid int, groupName string) error { - - if userCommand == "" { - return fmt.Errorf("Cannot add user; no useradd/adduser binary found") - } - args := fmt.Sprintf(cmdTemplates[userCommand], uid, groupName, userName) - return execAddCmd(userCommand, args) -} - -func addGroup(groupName string, gid int) error { - - if groupCommand == "" { - return fmt.Errorf("Cannot add group; no groupadd/addgroup binary found") - } - args := fmt.Sprintf(cmdTemplates[groupCommand], gid, groupName) - // only error out if the error isn't that the group already exists - // if the group exists then our needs are already met - if err := execAddCmd(groupCommand, args); err != nil && !strings.Contains(err.Error(), "already exists") { - return err - } - return nil -} - -func execAddCmd(cmd, args string) error { - execCmd := exec.Command(cmd, strings.Split(args, " ")...) - out, err := execCmd.CombinedOutput() - if err != nil { - return fmt.Errorf("Failed to add user/group with error: %v; output: %q", err, string(out)) - } - return nil -} - -func findUnusedUID(startUID int) (int, error) { - return findUnused("passwd", startUID) -} - -func findUnusedGID(startGID int) (int, error) { - return findUnused("group", startGID) -} - -func findUnused(file string, id int) (int, error) { - for { - cmdStr := fmt.Sprintf("cat /etc/%s | cut -d: -f3 | grep '^%d$'", file, id) - cmd := exec.Command("sh", "-c", cmdStr) - if err := cmd.Run(); err != nil { - // if a non-zero return code occurs, then we know the ID was not found - // and is usable - if exiterr, ok := err.(*exec.ExitError); ok { - // The program has exited with an exit code != 0 - if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { - if status.ExitStatus() == 1 { - //no match, we can use this ID - return id, nil - } - } - } - return -1, fmt.Errorf("Error looking in /etc/%s for unused ID: %v", file, err) - } - id++ - if id > idMAX { - return -1, fmt.Errorf("Maximum id in %q reached with finding unused numeric ID", file) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go deleted file mode 100644 index d98b354c..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package idtools - -import "fmt" - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -func AddNamespaceRangesUser(name string) (int, int, error) { - return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go deleted file mode 100644 index 932e1d1b..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,89 +0,0 @@ -package ioutils - -const maxCap = 1e6 - -// BytesPipe is io.ReadWriter which works similarly to pipe(queue). -// All written data could be read only once. Also BytesPipe is allocating -// and releasing new byte slices to adjust to current needs, so there won't be -// overgrown buffer after high load peak. -// BytesPipe isn't goroutine-safe, caller must synchronize it if needed. -type BytesPipe struct { - buf [][]byte // slice of byte-slices of buffered data - lastRead int // index in the first slice to a read point - bufLen int // length of data buffered over the slices -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe(buf []byte) *BytesPipe { - if cap(buf) == 0 { - buf = make([]byte, 0, 64) - } - return &BytesPipe{ - buf: [][]byte{buf[:0]}, - } -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (n int, err error) { - for { - // write data to the last buffer - b := bp.buf[len(bp.buf)-1] - // copy data to the current empty allocated area - n := copy(b[len(b):cap(b)], p) - // increment buffered data length - bp.bufLen += n - // include written data in last buffer - bp.buf[len(bp.buf)-1] = b[:len(b)+n] - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - // allocate slice that has twice the size of the last unless maximum reached - nextCap := 2 * cap(bp.buf[len(bp.buf)-1]) - if maxCap < nextCap { - nextCap = maxCap - } - // add new byte slice to the buffers slice and continue writing - bp.buf = append(bp.buf, make([]byte, 0, nextCap)) - } - return -} - -func (bp *BytesPipe) len() int { - return bp.bufLen - bp.lastRead -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { - for { - read := copy(p, bp.buf[0][bp.lastRead:]) - n += read - bp.lastRead += read - if bp.len() == 0 { - // we have read everything. reset to the beginning. - bp.lastRead = 0 - bp.bufLen -= len(bp.buf[0]) - bp.buf[0] = bp.buf[0][:0] - break - } - // break if everything was read - if len(p) == read { - break - } - // more buffered data and more asked. read from next slice. - p = p[read:] - bp.lastRead = 0 - bp.bufLen -= len(bp.buf[0]) - bp.buf[0] = nil // throw away old slice - bp.buf = bp.buf[1:] // switch to next - } - return -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe_test.go deleted file mode 100644 index 62b1a186..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package ioutils - -import ( - "crypto/sha1" - "encoding/hex" - "testing" -) - -func TestBytesPipeRead(t *testing.T) { - buf := NewBytesPipe(nil) - buf.Write([]byte("12")) - buf.Write([]byte("34")) - buf.Write([]byte("56")) - buf.Write([]byte("78")) - buf.Write([]byte("90")) - rd := make([]byte, 4) - n, err := buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 4 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) - } - if string(rd) != "1234" { - t.Fatalf("Read %s, but must be %s", rd, "1234") - } - n, err = buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 4 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) - } - if string(rd) != "5678" { - t.Fatalf("Read %s, but must be %s", rd, "5679") - } - n, err = buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 2 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2) - } - if string(rd[:n]) != "90" { - t.Fatalf("Read %s, but must be %s", rd, "90") - } -} - -func TestBytesPipeWrite(t *testing.T) { - buf := NewBytesPipe(nil) - buf.Write([]byte("12")) - buf.Write([]byte("34")) - buf.Write([]byte("56")) - buf.Write([]byte("78")) - buf.Write([]byte("90")) - if string(buf.buf[0]) != "1234567890" { - t.Fatalf("Buffer %s, must be %s", buf.buf, "1234567890") - } -} - -// Write and read in different speeds/chunk sizes and check valid data is read. -func TestBytesPipeWriteRandomChunks(t *testing.T) { - cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{ - {100, 10, 1}, - {1000, 10, 5}, - {1000, 100, 0}, - {1000, 5, 6}, - {10000, 50, 25}, - } - - testMessage := []byte("this is a random string for testing") - // random slice sizes to read and write - writeChunks := []int{25, 35, 15, 20} - readChunks := []int{5, 45, 20, 25} - - for _, c := range cases { - // first pass: write directly to hash - hash := sha1.New() - for i := 0; i < c.iterations*c.writesPerLoop; i++ { - if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil { - t.Fatal(err) - } - } - expected := hex.EncodeToString(hash.Sum(nil)) - - // write/read through buffer - buf := NewBytesPipe(nil) - hash.Reset() - for i := 0; i < c.iterations; i++ { - for w := 0; w < c.writesPerLoop; w++ { - buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]]) - } - for r := 0; r < c.readsPerLoop; r++ { - p := make([]byte, readChunks[(i*c.readsPerLoop+r)%len(readChunks)]) - n, _ := buf.Read(p) - hash.Write(p[:n]) - } - } - // read rest of the data from buffer - for i := 0; ; i++ { - p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)]) - n, _ := buf.Read(p) - if n == 0 { - break - } - hash.Write(p[:n]) - } - actual := hex.EncodeToString(hash.Sum(nil)) - - if expected != actual { - t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual) - } - - } -} - -func BenchmarkBytesPipeWrite(b *testing.B) { - for i := 0; i < b.N; i++ { - buf := NewBytesPipe(nil) - for j := 0; j < 1000; j++ { - buf.Write([]byte("pretty short line, because why not?")) - } - } -} - -func BenchmarkBytesPipeRead(b *testing.B) { - rd := make([]byte, 1024) - for i := 0; i < b.N; i++ { - b.StopTimer() - buf := NewBytesPipe(nil) - for j := 0; j < 1000; j++ { - buf.Write(make([]byte, 1024)) - } - b.StartTimer() - for j := 0; j < 1000; j++ { - if n, _ := buf.Read(rd); n != 1024 { - b.Fatalf("Wrong number of bytes: %d", n) - } - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go deleted file mode 100644 index 0b04b0ba..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go +++ /dev/null @@ -1,22 +0,0 @@ -package ioutils - -import ( - "fmt" - "io" -) - -// FprintfIfNotEmpty prints the string value if it's not empty -func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { - if value != "" { - return fmt.Fprintf(w, format, value) - } - return 0, nil -} - -// FprintfIfTrue prints the boolean value if it's true -func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { - if ok { - return fmt.Fprintf(w, format, ok) - } - return 0, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go deleted file mode 100644 index 89688632..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package ioutils - -import "testing" - -func TestFprintfIfNotEmpty(t *testing.T) { - wc := NewWriteCounter(&NopWriter{}) - n, _ := FprintfIfNotEmpty(wc, "foo%s", "") - - if wc.Count != 0 || n != 0 { - t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) - } - - n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") - if wc.Count != 6 || n != 6 { - t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go deleted file mode 100644 index f231aa9d..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go +++ /dev/null @@ -1,226 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "os" -) - -type pos struct { - idx int - offset int64 -} - -type multiReadSeeker struct { - readers []io.ReadSeeker - pos *pos - posIdx map[io.ReadSeeker]int -} - -func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { - var tmpOffset int64 - switch whence { - case os.SEEK_SET: - for i, rdr := range r.readers { - // get size of the current reader - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - if offset > tmpOffset+s { - if i == len(r.readers)-1 { - rdrOffset := s + (offset - tmpOffset) - if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { - return -1, err - } - r.pos = &pos{i, rdrOffset} - return offset, nil - } - - tmpOffset += s - continue - } - - rdrOffset := offset - tmpOffset - idx := i - - rdr.Seek(rdrOffset, os.SEEK_SET) - // make sure all following readers are at 0 - for _, rdr := range r.readers[i+1:] { - rdr.Seek(0, os.SEEK_SET) - } - - if rdrOffset == s && i != len(r.readers)-1 { - idx += 1 - rdrOffset = 0 - } - r.pos = &pos{idx, rdrOffset} - return offset, nil - } - case os.SEEK_END: - for _, rdr := range r.readers { - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - tmpOffset += s - } - r.Seek(tmpOffset+offset, os.SEEK_SET) - return tmpOffset + offset, nil - case os.SEEK_CUR: - if r.pos == nil { - return r.Seek(offset, os.SEEK_SET) - } - // Just return the current offset - if offset == 0 { - return r.getCurOffset() - } - - curOffset, err := r.getCurOffset() - if err != nil { - return -1, err - } - rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) - if err != nil { - return -1, err - } - - r.pos = &pos{r.posIdx[rdr], rdrOffset} - return curOffset + offset, nil - default: - return -1, fmt.Errorf("Invalid whence: %d", whence) - } - - return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) -} - -func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { - var rdr io.ReadSeeker - var rdrOffset int64 - - for i, rdr := range r.readers { - offsetTo, err := r.getOffsetToReader(rdr) - if err != nil { - return nil, -1, err - } - if offsetTo > offset { - rdr = r.readers[i-1] - rdrOffset = offsetTo - offset - break - } - - if rdr == r.readers[len(r.readers)-1] { - rdrOffset = offsetTo + offset - break - } - } - - return rdr, rdrOffset, nil -} - -func (r *multiReadSeeker) getCurOffset() (int64, error) { - var totalSize int64 - for _, rdr := range r.readers[:r.pos.idx+1] { - if r.posIdx[rdr] == r.pos.idx { - totalSize += r.pos.offset - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, fmt.Errorf("error getting seeker size: %v", err) - } - totalSize += size - } - return totalSize, nil -} - -func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { - var offset int64 - for _, r := range r.readers { - if r == rdr { - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, err - } - offset += size - } - return offset, nil -} - -func (r *multiReadSeeker) Read(b []byte) (int, error) { - if r.pos == nil { - r.pos = &pos{0, 0} - } - - bCap := int64(cap(b)) - buf := bytes.NewBuffer(nil) - var rdr io.ReadSeeker - - for _, rdr = range r.readers[r.pos.idx:] { - readBytes, err := io.CopyN(buf, rdr, bCap) - if err != nil && err != io.EOF { - return -1, err - } - bCap -= readBytes - - if bCap == 0 { - break - } - } - - rdrPos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - r.pos = &pos{r.posIdx[rdr], rdrPos} - return buf.Read(b) -} - -func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { - // save the current position - pos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - - // get the size - size, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - // reset the position - if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { - return -1, err - } - return size, nil -} - -// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided -// input readseekers. After calling this method the initial position is set to the -// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances -// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. -// Seek can be used over the sum of lengths of all readseekers. -// -// When a MultiReadSeeker is used, no Read and Seek operations should be made on -// its ReadSeeker components. Also, users should make no assumption on the state -// of individual readseekers while the MultiReadSeeker is used. -func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { - if len(readers) == 1 { - return readers[0] - } - idx := make(map[io.ReadSeeker]int) - for i, rdr := range readers { - idx[rdr] = i - } - return &multiReadSeeker{ - readers: readers, - posIdx: idx, - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go deleted file mode 100644 index de495b56..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "testing" -) - -func TestMultiReadSeekerReadAll(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) - - b, err := ioutil.ReadAll(mr) - if err != nil { - t.Fatal(err) - } - - expected := "hello world 1hello world 2hello world 3" - if string(b) != expected { - t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) - } - - size, err := mr.Seek(0, os.SEEK_END) - if err != nil { - t.Fatal(err) - } - if size != expectedSize { - t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) - } - - // Reset the position and read again - pos, err := mr.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatal(err) - } - if pos != 0 { - t.Fatalf("expected position to be set to 0, got %d", pos) - } - - b, err = ioutil.ReadAll(mr) - if err != nil { - t.Fatal(err) - } - - if string(b) != expected { - t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) - } -} - -func TestMultiReadSeekerReadEach(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - var totalBytes int64 - for i, s := range []*strings.Reader{s1, s2, s3} { - sLen := int64(s.Len()) - buf := make([]byte, s.Len()) - expected := []byte(fmt.Sprintf("%s %d", str, i+1)) - - if _, err := mr.Read(buf); err != nil && err != io.EOF { - t.Fatal(err) - } - - if !bytes.Equal(buf, expected) { - t.Fatalf("expected %q to be %q", string(buf), string(expected)) - } - - pos, err := mr.Seek(0, os.SEEK_CUR) - if err != nil { - t.Fatalf("iteration: %d, error: %v", i+1, err) - } - - // check that the total bytes read is the current position of the seeker - totalBytes += sLen - if pos != totalBytes { - t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) - } - - // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well - newPos, err := mr.Seek(pos, os.SEEK_SET) - if err != nil { - t.Fatal(err) - } - if newPos != pos { - t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) - } - } -} - -func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - buf := make([]byte, s1.Len()+3) - _, err := mr.Read(buf) - if err != nil { - t.Fatal(err) - } - - // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string - expected := "hello world 1hel" - if string(buf) != expected { - t.Fatalf("expected %s to be %s", string(buf), expected) - } -} - -func TestMultiReadSeekerNegativeSeek(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - s1Len := s1.Len() - s2Len := s2.Len() - s3Len := s3.Len() - - s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) - if err != nil { - t.Fatal(err) - } - if s != int64(s1Len+s2Len) { - t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) - } - - buf := make([]byte, s3Len) - if _, err := mr.Read(buf); err != nil && err != io.EOF { - t.Fatal(err) - } - expected := fmt.Sprintf("%s %d", str, 3) - if string(buf) != fmt.Sprintf("%s %d", str, 3) { - t.Fatalf("expected %q to be %q", string(buf), expected) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go deleted file mode 100644 index 54dd312b..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go +++ /dev/null @@ -1,170 +0,0 @@ -package ioutils - -import ( - "crypto/sha256" - "encoding/hex" - "io" - "sync" -) - -type readCloserWrapper struct { - io.Reader - closer func() error -} - -func (r *readCloserWrapper) Close() error { - return r.closer() -} - -// NewReadCloserWrapper returns a new io.ReadCloser. -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &readCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -// NewReaderErrWrapper returns a new io.Reader. -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// bufReader allows the underlying reader to continue to produce -// output by pre-emptively reading from the wrapped reader. -// This is achieved by buffering this data in bufReader's -// expanding buffer. -type bufReader struct { - sync.Mutex - buf io.ReadWriter - reader io.Reader - err error - wait sync.Cond - drainBuf []byte -} - -// NewBufReader returns a new bufReader. -func NewBufReader(r io.Reader) io.ReadCloser { - reader := &bufReader{ - buf: NewBytesPipe(nil), - reader: r, - drainBuf: make([]byte, 1024), - } - reader.wait.L = &reader.Mutex - go reader.drain() - return reader -} - -// NewBufReaderWithDrainbufAndBuffer returns a BufReader with drainBuffer and buffer. -func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer io.ReadWriter) io.ReadCloser { - reader := &bufReader{ - buf: buffer, - drainBuf: drainBuffer, - reader: r, - } - reader.wait.L = &reader.Mutex - go reader.drain() - return reader -} - -func (r *bufReader) drain() { - for { - //Call to scheduler is made to yield from this goroutine. - //This avoids goroutine looping here when n=0,err=nil, fixes code hangs when run with GCC Go. - callSchedulerIfNecessary() - n, err := r.reader.Read(r.drainBuf) - r.Lock() - if err != nil { - r.err = err - } else { - if n == 0 { - // nothing written, no need to signal - r.Unlock() - continue - } - r.buf.Write(r.drainBuf[:n]) - } - r.wait.Signal() - r.Unlock() - if err != nil { - break - } - } -} - -func (r *bufReader) Read(p []byte) (n int, err error) { - r.Lock() - defer r.Unlock() - for { - n, err = r.buf.Read(p) - if n > 0 { - return n, err - } - if r.err != nil { - return 0, r.err - } - r.wait.Wait() - } -} - -// Close closes the bufReader -func (r *bufReader) Close() error { - closer, ok := r.reader.(io.ReadCloser) - if !ok { - return nil - } - return closer.Close() -} - -// HashData returns the sha256 sum of src. -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - -// OnEOFReader wraps a io.ReadCloser and a function -// the function will run at the end of file or close the file. -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -// Close closes the file and run the function. -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go deleted file mode 100644 index 5c26a2a1..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go +++ /dev/null @@ -1,224 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "strings" - "testing" - "time" -) - -// Implement io.Reader -type errorReader struct{} - -func (r *errorReader) Read(p []byte) (int, error) { - return 0, fmt.Errorf("Error reader always fail.") -} - -func TestReadCloserWrapperClose(t *testing.T) { - reader := strings.NewReader("A string reader") - wrapper := NewReadCloserWrapper(reader, func() error { - return fmt.Errorf("This will be called when closing") - }) - err := wrapper.Close() - if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { - t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") - } -} - -func TestReaderErrWrapperReadOnError(t *testing.T) { - called := false - reader := &errorReader{} - wrapper := NewReaderErrWrapper(reader, func() { - called = true - }) - _, err := wrapper.Read([]byte{}) - if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { - t.Fatalf("readErrWrapper should returned an error") - } - if !called { - t.Fatalf("readErrWrapper should have call the anonymous function on failure") - } -} - -func TestReaderErrWrapperRead(t *testing.T) { - reader := strings.NewReader("a string reader.") - wrapper := NewReaderErrWrapper(reader, func() { - t.Fatalf("readErrWrapper should not have called the anonymous function") - }) - // Read 20 byte (should be ok with the string above) - num, err := wrapper.Read(make([]byte, 20)) - if err != nil { - t.Fatal(err) - } - if num != 16 { - t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) - } -} - -func TestNewBufReaderWithDrainbufAndBuffer(t *testing.T) { - reader, writer := io.Pipe() - - drainBuffer := make([]byte, 1024) - buffer := NewBytesPipe(nil) - bufreader := NewBufReaderWithDrainbufAndBuffer(reader, drainBuffer, buffer) - - // Write everything down to a Pipe - // Usually, a pipe should block but because of the buffered reader, - // the writes will go through - done := make(chan bool) - go func() { - writer.Write([]byte("hello world")) - writer.Close() - done <- true - }() - - // Drain the reader *after* everything has been written, just to verify - // it is indeed buffering - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("timeout") - } - - output, err := ioutil.ReadAll(bufreader) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(output, []byte("hello world")) { - t.Error(string(output)) - } -} - -func TestBufReader(t *testing.T) { - reader, writer := io.Pipe() - bufreader := NewBufReader(reader) - - // Write everything down to a Pipe - // Usually, a pipe should block but because of the buffered reader, - // the writes will go through - done := make(chan bool) - go func() { - writer.Write([]byte("hello world")) - writer.Close() - done <- true - }() - - // Drain the reader *after* everything has been written, just to verify - // it is indeed buffering - <-done - output, err := ioutil.ReadAll(bufreader) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(output, []byte("hello world")) { - t.Error(string(output)) - } -} - -func TestBufReaderCloseWithNonReaderCloser(t *testing.T) { - reader := strings.NewReader("buffer") - bufreader := NewBufReader(reader) - - if err := bufreader.Close(); err != nil { - t.Fatal(err) - } - -} - -// implements io.ReadCloser -type simpleReaderCloser struct { - err error -} - -func (r *simpleReaderCloser) Read(p []byte) (n int, err error) { - return 0, r.err -} - -func (r *simpleReaderCloser) Close() error { - r.err = io.EOF - return nil -} - -func TestBufReaderCloseWithReaderCloser(t *testing.T) { - reader := &simpleReaderCloser{} - bufreader := NewBufReader(reader) - - err := bufreader.Close() - if err != nil { - t.Fatal(err) - } - -} - -func TestHashData(t *testing.T) { - reader := strings.NewReader("hash-me") - actual, err := HashData(reader) - if err != nil { - t.Fatal(err) - } - expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" - if actual != expected { - t.Fatalf("Expecting %s, got %s", expected, actual) - } -} - -type repeatedReader struct { - readCount int - maxReads int - data []byte -} - -func newRepeatedReader(max int, data []byte) *repeatedReader { - return &repeatedReader{0, max, data} -} - -func (r *repeatedReader) Read(p []byte) (int, error) { - if r.readCount >= r.maxReads { - return 0, io.EOF - } - r.readCount++ - n := copy(p, r.data) - return n, nil -} - -func testWithData(data []byte, reads int) { - reader := newRepeatedReader(reads, data) - bufReader := NewBufReader(reader) - io.Copy(ioutil.Discard, bufReader) -} - -func Benchmark1M10BytesReads(b *testing.B) { - reads := 1000000 - readSize := int64(10) - data := make([]byte, readSize) - b.SetBytes(readSize * int64(reads)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - testWithData(data, reads) - } -} - -func Benchmark1M1024BytesReads(b *testing.B) { - reads := 1000000 - readSize := int64(1024) - data := make([]byte, readSize) - b.SetBytes(readSize * int64(reads)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - testWithData(data, reads) - } -} - -func Benchmark10k32KBytesReads(b *testing.B) { - reads := 10000 - readSize := int64(32 * 1024) - data := make([]byte, readSize) - b.SetBytes(readSize * int64(reads)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - testWithData(data, reads) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go deleted file mode 100644 index 3c88f29e..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !gccgo - -package ioutils - -func callSchedulerIfNecessary() { -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go deleted file mode 100644 index c11d02b9..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build gccgo - -package ioutils - -import ( - "runtime" -) - -func callSchedulerIfNecessary() { - //allow or force Go scheduler to switch context, without explicitly - //forcing this will make it hang when using gccgo implementation - runtime.Gosched() -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_unix.go deleted file mode 100644 index 1539ad21..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package ioutils - -import "io/ioutil" - -// TempDir on Unix systems is equivalent to ioutil.TempDir. -func TempDir(dir, prefix string) (string, error) { - return ioutil.TempDir(dir, prefix) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_windows.go deleted file mode 100644 index 72c0bc59..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package ioutils - -import ( - "io/ioutil" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath" -) - -// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. -func TempDir(dir, prefix string) (string, error) { - tempDir, err := ioutil.TempDir(dir, prefix) - if err != nil { - return "", err - } - return longpath.AddPrefix(tempDir), nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go deleted file mode 100644 index 25095474..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,47 +0,0 @@ -package ioutils - -import ( - "io" - "net/http" - "sync" -) - -type WriteFlusher struct { - sync.Mutex - w io.Writer - flusher http.Flusher - flushed bool -} - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - wf.Lock() - defer wf.Unlock() - n, err = wf.w.Write(b) - wf.flushed = true - wf.flusher.Flush() - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - wf.Lock() - defer wf.Unlock() - wf.flushed = true - wf.flusher.Flush() -} - -func (wf *WriteFlusher) Flushed() bool { - wf.Lock() - defer wf.Unlock() - return wf.flushed -} - -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var flusher http.Flusher - if f, ok := w.(http.Flusher); ok { - flusher = f - } else { - flusher = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: flusher} -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go deleted file mode 100644 index 43fdc44e..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go +++ /dev/null @@ -1,60 +0,0 @@ -package ioutils - -import "io" - -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -type NopFlusher struct{} - -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (r *writeCloserWrapper) Close() error { - return r.closer() -} - -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: r, - closer: closer, - } -} - -// Wrap a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go deleted file mode 100644 index 564b1cd4..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package ioutils - -import ( - "bytes" - "strings" - "testing" -) - -func TestWriteCloserWrapperClose(t *testing.T) { - called := false - writer := bytes.NewBuffer([]byte{}) - wrapper := NewWriteCloserWrapper(writer, func() error { - called = true - return nil - }) - if err := wrapper.Close(); err != nil { - t.Fatal(err) - } - if !called { - t.Fatalf("writeCloserWrapper should have call the anonymous function.") - } -} - -func TestNopWriteCloser(t *testing.T) { - writer := bytes.NewBuffer([]byte{}) - wrapper := NopWriteCloser(writer) - if err := wrapper.Close(); err != nil { - t.Fatal("NopWriteCloser always return nil on Close.") - } - -} - -func TestNopWriter(t *testing.T) { - nw := &NopWriter{} - l, err := nw.Write([]byte{'c'}) - if err != nil { - t.Fatal(err) - } - if l != 1 { - t.Fatalf("Expected 1 got %d", l) - } -} - -func TestWriteCounter(t *testing.T) { - dummy1 := "This is a dummy string." - dummy2 := "This is another dummy string." - totalLength := int64(len(dummy1) + len(dummy2)) - - reader1 := strings.NewReader(dummy1) - reader2 := strings.NewReader(dummy2) - - var buffer bytes.Buffer - wc := NewWriteCounter(&buffer) - - reader1.WriteTo(wc) - reader2.WriteTo(wc) - - if wc.Count != totalLength { - t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) - } - - if buffer.String() != dummy1+dummy2 { - t.Error("Wrong message written") - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath.go deleted file mode 100644 index 9b15bfff..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath.go +++ /dev/null @@ -1,26 +0,0 @@ -// longpath introduces some constants and helper functions for handling long paths -// in Windows, which are expected to be prepended with `\\?\` and followed by either -// a drive letter, a UNC server\share, or a volume identifier. - -package longpath - -import ( - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix will add the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath_test.go deleted file mode 100644 index 01865eff..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package longpath - -import ( - "strings" - "testing" -) - -func TestStandardLongPath(t *testing.T) { - c := `C:\simple\path` - longC := AddPrefix(c) - if !strings.EqualFold(longC, `\\?\C:\simple\path`) { - t.Errorf("Wrong long path returned. Original = %s ; Long = %s", c, longC) - } -} - -func TestUNCLongPath(t *testing.T) { - c := `\\server\share\path` - longC := AddPrefix(c) - if !strings.EqualFold(longC, `\\?\UNC\server\share\path`) { - t.Errorf("Wrong UNC long path returned. Original = %s ; Long = %s", c, longC) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go deleted file mode 100644 index a604a9e1..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go +++ /dev/null @@ -1,259 +0,0 @@ -// Package parsers provides helper functions to parse and validate different type -// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel -// operating system versions. -package parsers - -import ( - "fmt" - "net" - "net/url" - "path" - "runtime" - "strconv" - "strings" -) - -// ParseDockerDaemonHost parses the specified address and returns an address that will be used as the host. -// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr -// defaultUnixAddr must be a absolute file path (no `unix://` prefix) -// defaultTCPAddr must be the full `tcp://host:port` form -func ParseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) { - addr = strings.TrimSpace(addr) - if addr == "" { - if defaultAddr == defaultTLSHost { - return defaultTLSHost, nil - } - if runtime.GOOS != "windows" { - return fmt.Sprintf("unix://%s", defaultUnixAddr), nil - } - return defaultTCPAddr, nil - } - addrParts := strings.Split(addr, "://") - if len(addrParts) == 1 { - addrParts = []string{"tcp", addrParts[0]} - } - - switch addrParts[0] { - case "tcp": - return ParseTCPAddr(addrParts[1], defaultTCPAddr) - case "unix": - return ParseUnixAddr(addrParts[1], defaultUnixAddr) - case "fd": - return addr, nil - default: - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } -} - -// ParseUnixAddr parses and validates that the specified address is a valid UNIX -// socket address. It returns a formatted UNIX socket address, either using the -// address parsed from addr, or the contents of defaultAddr if addr is a blank -// string. -func ParseUnixAddr(addr string, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, "unix://") - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) - } - if addr == "" { - addr = defaultAddr - } - return fmt.Sprintf("unix://%s", addr), nil -} - -// ParseTCPAddr parses and validates that the specified address is a valid TCP -// address. It returns a formatted TCP address, either using the address parsed -// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. -// tryAddr is expected to have already been Trim()'d -// defaultAddr must be in the full `tcp://host:port` form -func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { - if tryAddr == "" || tryAddr == "tcp://" { - return defaultAddr, nil - } - addr := strings.TrimPrefix(tryAddr, "tcp://") - if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) - } - - u, err := url.Parse("tcp://" + addr) - if err != nil { - return "", err - } - - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") - defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) - if err != nil { - return "", err - } - - if host == "" { - host = defaultHost - } - if port == "" { - port = defaultPort - } - p, err := strconv.Atoi(port) - if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - if net.ParseIP(host).To4() == nil && strings.Contains(host, ":") { - // This is either an ipv6 address - host = "[" + host + "]" - } - return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil -} - -// ParseRepositoryTag gets a repos name and returns the right reposName + tag|digest -// The tag can be confusing because of a port in a repository name. -// Ex: localhost.localdomain:5000/samalba/hipache:latest -// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb -func ParseRepositoryTag(repos string) (string, string) { - n := strings.Index(repos, "@") - if n >= 0 { - parts := strings.Split(repos, "@") - return parts[0], parts[1] - } - n = strings.LastIndex(repos, ":") - if n < 0 { - return repos, "" - } - if tag := repos[n+1:]; !strings.Contains(tag, "/") { - return repos[:n], tag - } - return repos, "" -} - -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - -// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) -func ParseKeyValueOpt(opt string) (string, string, error) { - parts := strings.SplitN(opt, "=", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) - } - return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil -} - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} - -// ParseLink parses and validates the specified string as a link format (name:alias) -func ParseLink(val string) (string, string, error) { - if val == "" { - return "", "", fmt.Errorf("empty string specified for links") - } - arr := strings.Split(val, ":") - if len(arr) > 2 { - return "", "", fmt.Errorf("bad format for links: %s", val) - } - if len(arr) == 1 { - return val, val, nil - } - // This is kept because we can actually get an HostConfig with links - // from an already created container and the format is not `foo:bar` - // but `/foo:/c1/bar` - if strings.HasPrefix(arr[0], "/") { - _, alias := path.Split(arr[1]) - return arr[0][1:], alias, nil - } - return arr[0], arr[1], nil -} - -// ParseUintList parses and validates the specified string as the value -// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be -// one of the formats below. Note that duplicates are actually allowed in the -// input string. It returns a `map[int]bool` with available elements from `val` -// set to `true`. -// Supported formats: -// 7 -// 1-6 -// 0,3-4,7,8-10 -// 0-0,0,1-7 -// 03,1-3 <- this is gonna get parsed as [1,2,3] -// 3,2,1 -// 0-2,3,1 -func ParseUintList(val string) (map[int]bool, error) { - if val == "" { - return map[int]bool{}, nil - } - - availableInts := make(map[int]bool) - split := strings.Split(val, ",") - errInvalidFormat := fmt.Errorf("invalid format: %s", val) - - for _, r := range split { - if !strings.Contains(r, "-") { - v, err := strconv.Atoi(r) - if err != nil { - return nil, errInvalidFormat - } - availableInts[v] = true - } else { - split := strings.SplitN(r, "-", 2) - min, err := strconv.Atoi(split[0]) - if err != nil { - return nil, errInvalidFormat - } - max, err := strconv.Atoi(split[1]) - if err != nil { - return nil, errInvalidFormat - } - if max < min { - return nil, errInvalidFormat - } - for i := min; i <= max; i++ { - availableInts[i] = true - } - } - } - return availableInts, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers_test.go deleted file mode 100644 index 47b45281..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers_test.go +++ /dev/null @@ -1,295 +0,0 @@ -package parsers - -import ( - "reflect" - "runtime" - "strings" - "testing" -) - -func TestParseDockerDaemonHost(t *testing.T) { - var ( - defaultHTTPHost = "tcp://localhost:2375" - defaultHTTPSHost = "tcp://localhost:2376" - defaultUnix = "/var/run/docker.sock" - defaultHOST = "unix:///var/run/docker.sock" - ) - if runtime.GOOS == "windows" { - defaultHOST = defaultHTTPHost - } - invalids := map[string]string{ - "0.0.0.0": "Invalid bind address format: 0.0.0.0", - "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", - "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", - "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", - "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", - "tcp://unix:///run/docker.sock": "Invalid bind address format: unix", - "tcp": "Invalid bind address format: tcp", - "unix": "Invalid bind address format: unix", - "fd": "Invalid bind address format: fd", - } - valids := map[string]string{ - "0.0.0.1:": "tcp://0.0.0.1:2375", - "0.0.0.1:5555": "tcp://0.0.0.1:5555", - "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", - "[::1]:": "tcp://[::1]:2375", - "[::1]:5555/path": "tcp://[::1]:5555/path", - "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2375", - "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", - ":6666": "tcp://localhost:6666", - ":6666/path": "tcp://localhost:6666/path", - "": defaultHOST, - " ": defaultHOST, - " ": defaultHOST, - "tcp://": defaultHTTPHost, - "tcp://:7777": "tcp://localhost:7777", - "tcp://:7777/path": "tcp://localhost:7777/path", - " tcp://:7777/path ": "tcp://localhost:7777/path", - "unix:///run/docker.sock": "unix:///run/docker.sock", - "unix://": "unix:///var/run/docker.sock", - "fd://": "fd://", - "fd://something": "fd://something", - "localhost:": "tcp://localhost:2375", - "localhost:5555": "tcp://localhost:5555", - "localhost:5555/path": "tcp://localhost:5555/path", - } - for invalidAddr, expectedError := range invalids { - if addr, err := ParseDockerDaemonHost(defaultHTTPHost, defaultHTTPSHost, defaultUnix, "", invalidAddr); err == nil || err.Error() != expectedError { - t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) - } - } - for validAddr, expectedAddr := range valids { - if addr, err := ParseDockerDaemonHost(defaultHTTPHost, defaultHTTPSHost, defaultUnix, "", validAddr); err != nil || addr != expectedAddr { - t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) - } - } -} - -func TestParseTCP(t *testing.T) { - var ( - defaultHTTPHost = "tcp://127.0.0.1:2376" - ) - invalids := map[string]string{ - "0.0.0.0": "Invalid bind address format: 0.0.0.0", - "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", - "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", - "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", - "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", - } - valids := map[string]string{ - "": defaultHTTPHost, - "tcp://": defaultHTTPHost, - "0.0.0.1:": "tcp://0.0.0.1:2376", - "0.0.0.1:5555": "tcp://0.0.0.1:5555", - "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", - ":6666": "tcp://127.0.0.1:6666", - ":6666/path": "tcp://127.0.0.1:6666/path", - "tcp://:7777": "tcp://127.0.0.1:7777", - "tcp://:7777/path": "tcp://127.0.0.1:7777/path", - "[::1]:": "tcp://[::1]:2376", - "[::1]:5555": "tcp://[::1]:5555", - "[::1]:5555/path": "tcp://[::1]:5555/path", - "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2376", - "[0:0:0:0:0:0:0:1]:5555": "tcp://[0:0:0:0:0:0:0:1]:5555", - "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", - "localhost:": "tcp://localhost:2376", - "localhost:5555": "tcp://localhost:5555", - "localhost:5555/path": "tcp://localhost:5555/path", - } - for invalidAddr, expectedError := range invalids { - if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { - t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) - } - } - for validAddr, expectedAddr := range valids { - if addr, err := ParseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { - t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) - } - } -} - -func TestParseInvalidUnixAddrInvalid(t *testing.T) { - if _, err := ParseUnixAddr("tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { - t.Fatalf("Expected an error, got %v", err) - } - if _, err := ParseUnixAddr("unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { - t.Fatalf("Expected an error, got %v", err) - } - if v, err := ParseUnixAddr("", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { - t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") - } -} - -func TestParseRepositoryTag(t *testing.T) { - if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) - } - if repo, digest := ParseRepositoryTag("root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "root" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { - t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "root", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) - } - if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) - } - if repo, digest := ParseRepositoryTag("user/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "user/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { - t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "user/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) - } - if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) - } - if repo, digest := ParseRepositoryTag("url:5000/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "url:5000/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { - t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "url:5000/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) - } -} - -func TestParseKeyValueOpt(t *testing.T) { - invalids := map[string]string{ - "": "Unable to parse key/value option: ", - "key": "Unable to parse key/value option: key", - } - for invalid, expectedError := range invalids { - if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { - t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) - } - } - valids := map[string][]string{ - "key=value": {"key", "value"}, - " key = value ": {"key", "value"}, - "key=value1=value2": {"key", "value1=value2"}, - " key = value1 = value2 ": {"key", "value1 = value2"}, - } - for valid, expectedKeyValue := range valids { - key, value, err := ParseKeyValueOpt(valid) - if err != nil { - t.Fatal(err) - } - if key != expectedKeyValue[0] || value != expectedKeyValue[1] { - t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) - } - } -} - -func TestParsePortRange(t *testing.T) { - if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 { - t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end) - } -} - -func TestParsePortRangeEmpty(t *testing.T) { - if _, _, err := ParsePortRange(""); err == nil || err.Error() != "Empty string specified for ports." { - t.Fatalf("Expected error 'Empty string specified for ports.', got %v", err) - } -} - -func TestParsePortRangeWithNoRange(t *testing.T) { - start, end, err := ParsePortRange("8080") - if err != nil { - t.Fatal(err) - } - if start != 8080 || end != 8080 { - t.Fatalf("Expected start and end to be the same and equal to 8080, but were %v and %v", start, end) - } -} - -func TestParsePortRangeIncorrectRange(t *testing.T) { - if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} - -func TestParsePortRangeIncorrectEndRange(t *testing.T) { - if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } - - if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} - -func TestParsePortRangeIncorrectStartRange(t *testing.T) { - if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } - - if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} - -func TestParseLink(t *testing.T) { - name, alias, err := ParseLink("name:alias") - if err != nil { - t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) - } - if name != "name" { - t.Fatalf("Link name should have been name, got %s instead", name) - } - if alias != "alias" { - t.Fatalf("Link alias should have been alias, got %s instead", alias) - } - // short format definition - name, alias, err = ParseLink("name") - if err != nil { - t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) - } - if name != "name" { - t.Fatalf("Link name should have been name, got %s instead", name) - } - if alias != "name" { - t.Fatalf("Link alias should have been name, got %s instead", alias) - } - // empty string link definition is not allowed - if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { - t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) - } - // more than two colons are not allowed - if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { - t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) - } -} - -func TestParseUintList(t *testing.T) { - valids := map[string]map[int]bool{ - "": {}, - "7": {7: true}, - "1-6": {1: true, 2: true, 3: true, 4: true, 5: true, 6: true}, - "0-7": {0: true, 1: true, 2: true, 3: true, 4: true, 5: true, 6: true, 7: true}, - "0,3-4,7,8-10": {0: true, 3: true, 4: true, 7: true, 8: true, 9: true, 10: true}, - "0-0,0,1-4": {0: true, 1: true, 2: true, 3: true, 4: true}, - "03,1-3": {1: true, 2: true, 3: true}, - "3,2,1": {1: true, 2: true, 3: true}, - "0-2,3,1": {0: true, 1: true, 2: true, 3: true}, - } - for k, v := range valids { - out, err := ParseUintList(k) - if err != nil { - t.Fatalf("Expected not to fail, got %v", err) - } - if !reflect.DeepEqual(out, v) { - t.Fatalf("Expected %v, got %v", v, out) - } - } - - invalids := []string{ - "this", - "1--", - "1-10,,10", - "10-1", - "-1", - "-1,0", - } - for _, v := range invalids { - if out, err := ParseUintList(v); err == nil { - t.Fatalf("Expected failure with %s but got %v", v, out) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go deleted file mode 100644 index 515fb4d0..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go +++ /dev/null @@ -1,119 +0,0 @@ -// Package pools provides a collection of pools which provide various -// data types with buffers. These can be used to lower the number of -// memory allocations and reuse buffers. -// -// New pools should be added to this package to allow them to be -// shared across packages. -// -// Utility functions which operate on pools should be added to this -// package to allow them to be reused. -package pools - -import ( - "bufio" - "io" - "sync" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils" -) - -var ( - // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. - BufioReader32KPool *BufioReaderPool - // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. - BufioWriter32KPool *BufioWriterPool -) - -const buffer32K = 32 * 1024 - -// BufioReaderPool is a bufio reader that uses sync.Pool. -type BufioReaderPool struct { - pool sync.Pool -} - -func init() { - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) -} - -// newBufioReaderPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - pool := sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, size) }, - } - return &BufioReaderPool{pool: pool} -} - -// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - buf := bufPool.pool.Get().(*bufio.Reader) - buf.Reset(r) - return buf -} - -// Put puts the bufio.Reader back into the pool. -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. -func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := BufioReader32KPool.Get(src) - written, err = io.Copy(dst, buf) - BufioReader32KPool.Put(buf) - return -} - -// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back -// into the pool and closes the reader if it's an io.ReadCloser. -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - readCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} - -// BufioWriterPool is a bufio writer that uses sync.Pool. -type BufioWriterPool struct { - pool sync.Pool -} - -// newBufioWriterPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - pool := sync.Pool{ - New: func() interface{} { return bufio.NewWriterSize(nil, size) }, - } - return &BufioWriterPool{pool: pool} -} - -// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - buf := bufPool.pool.Get().(*bufio.Writer) - buf.Reset(w) - return buf -} - -// Put puts the bufio.Writer back into the pool. -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back -// into the pool and closes the writer if it's an io.Writecloser. -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - writeCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go deleted file mode 100644 index 78689800..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package pools - -import ( - "bufio" - "bytes" - "io" - "strings" - "testing" -) - -func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { - reader := BufioReader32KPool.Get(nil) - if reader == nil { - t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") - } -} - -func TestBufioReaderPoolPutAndGet(t *testing.T) { - sr := bufio.NewReader(strings.NewReader("foobar")) - reader := BufioReader32KPool.Get(sr) - if reader == nil { - t.Fatalf("BufioReaderPool should not return a nil reader.") - } - // verify the first 3 byte - buf1 := make([]byte, 3) - _, err := reader.Read(buf1) - if err != nil { - t.Fatal(err) - } - if actual := string(buf1); actual != "foo" { - t.Fatalf("The first letter should have been 'foo' but was %v", actual) - } - BufioReader32KPool.Put(reader) - // Try to read the next 3 bytes - _, err = sr.Read(make([]byte, 3)) - if err == nil || err != io.EOF { - t.Fatalf("The buffer should have been empty, issue an EOF error.") - } -} - -type simpleReaderCloser struct { - io.Reader - closed bool -} - -func (r *simpleReaderCloser) Close() error { - r.closed = true - return nil -} - -func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { - br := bufio.NewReader(strings.NewReader("")) - sr := &simpleReaderCloser{ - Reader: strings.NewReader("foobar"), - closed: false, - } - reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) - if reader == nil { - t.Fatalf("NewReadCloserWrapper should not return a nil reader.") - } - // Verify the content of reader - buf := make([]byte, 3) - _, err := reader.Read(buf) - if err != nil { - t.Fatal(err) - } - if actual := string(buf); actual != "foo" { - t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) - } - reader.Close() - // Read 3 more bytes "bar" - _, err = reader.Read(buf) - if err != nil { - t.Fatal(err) - } - if actual := string(buf); actual != "bar" { - t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) - } - if !sr.closed { - t.Fatalf("The ReaderCloser should have been closed, it is not.") - } -} - -func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { - writer := BufioWriter32KPool.Get(nil) - if writer == nil { - t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") - } -} - -func TestBufioWriterPoolPutAndGet(t *testing.T) { - buf := new(bytes.Buffer) - bw := bufio.NewWriter(buf) - writer := BufioWriter32KPool.Get(bw) - if writer == nil { - t.Fatalf("BufioReaderPool should not return a nil writer.") - } - written, err := writer.Write([]byte("foobar")) - if err != nil { - t.Fatal(err) - } - if written != 6 { - t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) - } - // Make sure we Flush all the way ? - writer.Flush() - bw.Flush() - if len(buf.Bytes()) != 6 { - t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) - } - // Reset the buffer - buf.Reset() - BufioWriter32KPool.Put(writer) - // Try to write something - written, err = writer.Write([]byte("barfoo")) - if err != nil { - t.Fatal(err) - } - // If we now try to flush it, it should panic (the writer is nil) - // recover it - defer func() { - if r := recover(); r == nil { - t.Fatal("Trying to flush the writter should have 'paniced', did not.") - } - }() - writer.Flush() -} - -type simpleWriterCloser struct { - io.Writer - closed bool -} - -func (r *simpleWriterCloser) Close() error { - r.closed = true - return nil -} - -func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { - buf := new(bytes.Buffer) - bw := bufio.NewWriter(buf) - sw := &simpleWriterCloser{ - Writer: new(bytes.Buffer), - closed: false, - } - bw.Flush() - writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) - if writer == nil { - t.Fatalf("BufioReaderPool should not return a nil writer.") - } - written, err := writer.Write([]byte("foobar")) - if err != nil { - t.Fatal(err) - } - if written != 6 { - t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) - } - writer.Close() - if !sw.closed { - t.Fatalf("The ReaderCloser should have been closed, it is not.") - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go deleted file mode 100644 index dd52b908..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go +++ /dev/null @@ -1,11 +0,0 @@ -package promise - -// Go is a basic promise implementation: it wraps calls a function in a goroutine, -// and returns a channel which will later return the function's return value. -func Go(f func() error) chan error { - ch := make(chan error, 1) - go func() { - ch <- f() - }() - return ch -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go deleted file mode 100644 index 88d88d41..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package stdcopy - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "strings" - "testing" -) - -func TestNewStdWriter(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - if writer == nil { - t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") - } -} - -func TestWriteWithUnitializedStdWriter(t *testing.T) { - writer := StdWriter{ - Writer: nil, - prefix: Stdout, - sizeBuf: make([]byte, 4), - } - n, err := writer.Write([]byte("Something here")) - if n != 0 || err == nil { - t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") - } -} - -func TestWriteWithNilBytes(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - n, err := writer.Write(nil) - if err != nil { - t.Fatalf("Shouldn't have fail when given no data") - } - if n > 0 { - t.Fatalf("Write should have written 0 byte, but has written %d", n) - } -} - -func TestWrite(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - data := []byte("Test StdWrite.Write") - n, err := writer.Write(data) - if err != nil { - t.Fatalf("Error while writing with StdWrite") - } - if n != len(data) { - t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n) - } -} - -type errWriter struct { - n int - err error -} - -func (f *errWriter) Write(buf []byte) (int, error) { - return f.n, f.err -} - -func TestWriteWithWriterError(t *testing.T) { - expectedError := errors.New("expected") - expectedReturnedBytes := 10 - writer := NewStdWriter(&errWriter{ - n: stdWriterPrefixLen + expectedReturnedBytes, - err: expectedError}, Stdout) - data := []byte("This won't get written, sigh") - n, err := writer.Write(data) - if err != expectedError { - t.Fatalf("Didn't get expected error.") - } - if n != expectedReturnedBytes { - t.Fatalf("Didn't get expected writen bytes %d, got %d.", - expectedReturnedBytes, n) - } -} - -func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) { - writer := NewStdWriter(&errWriter{n: -1}, Stdout) - data := []byte("This won't get written, sigh") - actual, _ := writer.Write(data) - if actual != 0 { - t.Fatalf("Expected returned written bytes equal to 0, got %d", actual) - } -} - -func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) { - buffer = new(bytes.Buffer) - dstOut := NewStdWriter(buffer, Stdout) - _, err = dstOut.Write(stdOutBytes) - if err != nil { - return - } - dstErr := NewStdWriter(buffer, Stderr) - _, err = dstErr.Write(stdErrBytes) - return -} - -func TestStdCopyWriteAndRead(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer) - if err != nil { - t.Fatal(err) - } - expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes) - if written != int64(expectedTotalWritten) { - t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written) - } -} - -type customReader struct { - n int - err error - totalCalls int - correctCalls int - src *bytes.Buffer -} - -func (f *customReader) Read(buf []byte) (int, error) { - f.totalCalls++ - if f.totalCalls <= f.correctCalls { - return f.src.Read(buf) - } - return f.n, f.err -} - -func TestStdCopyReturnsErrorReadingHeader(t *testing.T) { - expectedError := errors.New("error") - reader := &customReader{ - err: expectedError} - written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) - if written != 0 { - t.Fatalf("Expected 0 bytes read, got %d", written) - } - if err != expectedError { - t.Fatalf("Didn't get expected error") - } -} - -func TestStdCopyReturnsErrorReadingFrame(t *testing.T) { - expectedError := errors.New("error") - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - reader := &customReader{ - correctCalls: 1, - n: stdWriterPrefixLen + 1, - err: expectedError, - src: buffer} - written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) - if written != 0 { - t.Fatalf("Expected 0 bytes read, got %d", written) - } - if err != expectedError { - t.Fatalf("Didn't get expected error") - } -} - -func TestStdCopyDetectsCorruptedFrame(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - reader := &customReader{ - correctCalls: 1, - n: stdWriterPrefixLen + 1, - err: io.EOF, - src: buffer} - written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) - if written != startingBufLen { - t.Fatalf("Expected 0 bytes read, got %d", written) - } - if err != nil { - t.Fatal("Didn't get nil error") - } -} - -func TestStdCopyWithInvalidInputHeader(t *testing.T) { - dstOut := NewStdWriter(ioutil.Discard, Stdout) - dstErr := NewStdWriter(ioutil.Discard, Stderr) - src := strings.NewReader("Invalid input") - _, err := StdCopy(dstOut, dstErr, src) - if err == nil { - t.Fatal("StdCopy with invalid input header should fail.") - } -} - -func TestStdCopyWithCorruptedPrefix(t *testing.T) { - data := []byte{0x01, 0x02, 0x03} - src := bytes.NewReader(data) - written, err := StdCopy(nil, nil, src) - if err != nil { - t.Fatalf("StdCopy should not return an error with corrupted prefix.") - } - if written != 0 { - t.Fatalf("StdCopy should have written 0, but has written %d", written) - } -} - -func TestStdCopyReturnsWriteErrors(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - expectedError := errors.New("expected") - - dstOut := &errWriter{err: expectedError} - - written, err := StdCopy(dstOut, ioutil.Discard, buffer) - if written != 0 { - t.Fatalf("StdCopy should have written 0, but has written %d", written) - } - if err != expectedError { - t.Fatalf("Didn't get expected error, got %v", err) - } -} - -func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - dstOut := &errWriter{n: startingBufLen - 10} - - written, err := StdCopy(dstOut, ioutil.Discard, buffer) - if written != 0 { - t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written) - } - if err != io.ErrShortWrite { - t.Fatalf("Didn't get expected io.ErrShortWrite error") - } -} - -func BenchmarkWrite(b *testing.B) { - w := NewStdWriter(ioutil.Discard, Stdout) - data := []byte("Test line for testing stdwriter performance\n") - data = bytes.Repeat(data, 100) - b.SetBytes(int64(len(data))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err := w.Write(data); err != nil { - b.Fatal(err) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go deleted file mode 100644 index 31ed9ff1..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go +++ /dev/null @@ -1,31 +0,0 @@ -package system - -import ( - "os" - "time" -) - -// Chtimes changes the access time and modified time of a file at the given path -func Chtimes(name string, atime time.Time, mtime time.Time) error { - unixMinTime := time.Unix(0, 0) - // The max Unix time is 33 bits set - unixMaxTime := unixMinTime.Add((1<<33 - 1) * time.Second) - - // If the modified time is prior to the Unix Epoch, or after the - // end of Unix Time, os.Chtimes has undefined behavior - // default to Unix Epoch in this case, just in case - - if atime.Before(unixMinTime) || atime.After(unixMaxTime) { - atime = unixMinTime - } - - if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { - mtime = unixMinTime - } - - if err := os.Chtimes(name, atime, mtime); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_test.go deleted file mode 100644 index f65a4b80..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package system - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" -) - -// prepareTempFile creates a temporary file in a temporary directory. -func prepareTempFile(t *testing.T) (string, string) { - dir, err := ioutil.TempDir("", "docker-system-test") - if err != nil { - t.Fatal(err) - } - - file := filepath.Join(dir, "exist") - if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { - t.Fatal(err) - } - return file, dir -} - -// TestChtimes tests Chtimes on a tempfile. Test only mTime, because aTime is OS dependent -func TestChtimes(t *testing.T) { - file, dir := prepareTempFile(t) - defer os.RemoveAll(dir) - - beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) - unixEpochTime := time.Unix(0, 0) - afterUnixEpochTime := time.Unix(100, 0) - // The max Unix time is 33 bits set - unixMaxTime := unixEpochTime.Add((1<<33 - 1) * time.Second) - afterUnixMaxTime := unixMaxTime.Add(100 * time.Second) - - // Test both aTime and mTime set to Unix Epoch - Chtimes(file, unixEpochTime, unixEpochTime) - - f, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) - } - - // Test aTime before Unix Epoch and mTime set to Unix Epoch - Chtimes(file, beforeUnixEpochTime, unixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixEpochTime, beforeUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) - } - - // Test both aTime and mTime set to after Unix Epoch (valid time) - Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != afterUnixEpochTime { - t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, f.ModTime()) - } - - // Test both aTime and mTime set to Unix max time - Chtimes(file, unixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixMaxTime { - t.Fatalf("Expected: %s, got: %s", unixMaxTime, f.ModTime()) - } - - // Test aTime after Unix max time and mTime set to Unix max time - Chtimes(file, afterUnixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixMaxTime { - t.Fatalf("Expected: %s, got: %s", unixMaxTime, f.ModTime()) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixMaxTime, afterUnixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_unix_test.go deleted file mode 100644 index 6998bbef..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_unix_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// +build linux freebsd - -package system - -import ( - "os" - "syscall" - "testing" - "time" -) - -// TestChtimes tests Chtimes access time on a tempfile on Linux -func TestChtimesLinux(t *testing.T) { - file, dir := prepareTempFile(t) - defer os.RemoveAll(dir) - - beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) - unixEpochTime := time.Unix(0, 0) - afterUnixEpochTime := time.Unix(100, 0) - // The max Unix time is 33 bits set - unixMaxTime := unixEpochTime.Add((1<<33 - 1) * time.Second) - afterUnixMaxTime := unixMaxTime.Add(100 * time.Second) - - // Test both aTime and mTime set to Unix Epoch - Chtimes(file, unixEpochTime, unixEpochTime) - - f, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat := f.Sys().(*syscall.Stat_t) - aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime before Unix Epoch and mTime set to Unix Epoch - Chtimes(file, beforeUnixEpochTime, unixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixEpochTime, beforeUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test both aTime and mTime set to after Unix Epoch (valid time) - Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != afterUnixEpochTime { - t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) - } - - // Test both aTime and mTime set to Unix max time - Chtimes(file, unixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixMaxTime { - t.Fatalf("Expected: %s, got: %s", unixMaxTime, aTime) - } - - // Test aTime after Unix max time and mTime set to Unix max time - Chtimes(file, afterUnixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixMaxTime, afterUnixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixMaxTime { - t.Fatalf("Expected: %s, got: %s", unixMaxTime, aTime) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_windows_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_windows_test.go deleted file mode 100644 index f09c4028..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_windows_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "syscall" - "testing" - "time" -) - -// TestChtimes tests Chtimes access time on a tempfile on Windows -func TestChtimesWindows(t *testing.T) { - file, dir := prepareTempFile(t) - defer os.RemoveAll(dir) - - beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) - unixEpochTime := time.Unix(0, 0) - afterUnixEpochTime := time.Unix(100, 0) - // The max Unix time is 33 bits set - unixMaxTime := unixEpochTime.Add((1<<33 - 1) * time.Second) - afterUnixMaxTime := unixMaxTime.Add(100 * time.Second) - - // Test both aTime and mTime set to Unix Epoch - Chtimes(file, unixEpochTime, unixEpochTime) - - f, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime := time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime before Unix Epoch and mTime set to Unix Epoch - Chtimes(file, beforeUnixEpochTime, unixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixEpochTime, beforeUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test both aTime and mTime set to after Unix Epoch (valid time) - Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != afterUnixEpochTime { - t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) - } - - // Test both aTime and mTime set to Unix max time - Chtimes(file, unixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixMaxTime { - t.Fatalf("Expected: %s, got: %s", unixMaxTime, aTime) - } - - // Test aTime after Unix max time and mTime set to Unix max time - Chtimes(file, afterUnixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixMaxTime, afterUnixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixMaxTime { - t.Fatalf("Expected: %s, got: %s", unixMaxTime, aTime) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go deleted file mode 100644 index 28831898..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package system - -import ( - "errors" -) - -var ( - // ErrNotSupportedPlatform means the platform is not supported. - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") -) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go deleted file mode 100644 index 04e2de78..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go +++ /dev/null @@ -1,83 +0,0 @@ -package system - -// This file implements syscalls for Win32 events which are not implemented -// in golang. - -import ( - "syscall" - "unsafe" -) - -var ( - procCreateEvent = modkernel32.NewProc("CreateEventW") - procOpenEvent = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") -) - -// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. -func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if manualReset { - _p1 = 1 - } - var _p2 uint32 - if initialState { - _p2 = 1 - } - r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. -func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if inheritHandle { - _p1 = 1 - } - r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// SetEvent implements win32 SetEvent func in golang. -func SetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procSetEvent) -} - -// ResetEvent implements win32 ResetEvent func in golang. -func ResetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procResetEvent) -} - -// PulseEvent implements win32 PulseEvent func in golang. -func PulseEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procPulseEvent) -} - -func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { - r0, _, _ := proc.Call(uintptr(handle)) - if r0 != 0 { - err = syscall.Errno(r0) - } - return -} - -var temp unsafe.Pointer - -// use ensures a variable is kept alive without the GC freeing while still needed -func use(p unsafe.Pointer) { - temp = p -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go deleted file mode 100644 index c14feb84..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package system - -import ( - "os" - "path/filepath" -) - -// MkdirAll creates a directory named path along with any necessary parents, -// with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. -func IsAbs(path string) bool { - return filepath.IsAbs(path) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go deleted file mode 100644 index 16823d55..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go +++ /dev/null @@ -1,82 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "path/filepath" - "regexp" - "strings" - "syscall" -) - -// MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, perm os.FileMode) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { - return nil - } - - // The rest of this method is copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = MkdirAll(path[0:j-1], perm) - if err != nil { - return err - } - } - - // Parent now exists; invoke Mkdir and use its result. - err = os.Mkdir(path, perm) - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if !filepath.IsAbs(path) { - if !strings.HasPrefix(path, string(os.PathSeparator)) { - return false - } - } - return true -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go deleted file mode 100644 index bd23c4d5..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Lstat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Lstat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Lstat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_test.go deleted file mode 100644 index 6bac492e..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package system - -import ( - "os" - "testing" -) - -// TestLstat tests Lstat for existing and non existing files -func TestLstat(t *testing.T) { - file, invalid, _, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - statFile, err := Lstat(file) - if err != nil { - t.Fatal(err) - } - if statFile == nil { - t.Fatal("returned empty stat for existing file") - } - - statInvalid, err := Lstat(invalid) - if err == nil { - t.Fatal("did not return error for non-existing file") - } - if statInvalid != nil { - t.Fatal("returned non-nil stat for non-existing file") - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_unix_test.go deleted file mode 100644 index 062cf53b..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_unix_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build linux freebsd - -package system - -import ( - "os" - "testing" -) - -// TestLstat tests Lstat for existing and non existing files -func TestLstat(t *testing.T) { - file, invalid, _, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - statFile, err := Lstat(file) - if err != nil { - t.Fatal(err) - } - if statFile == nil { - t.Fatal("returned empty stat for existing file") - } - - statInvalid, err := Lstat(invalid) - if err == nil { - t.Fatal("did not return error for non-existing file") - } - if statInvalid != nil { - t.Fatal("returned non-nil stat for non-existing file") - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go deleted file mode 100644 index 49e87eb4..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package system - -import ( - "os" -) - -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -// Note the Linux version uses fromStatT to do the copy back, -// but that not strictly necessary when already in an OS specific module. -func Lstat(path string) (*StatT, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return &StatT{ - name: fi.Name(), - size: fi.Size(), - mode: fi.Mode(), - modTime: fi.ModTime(), - isDir: fi.IsDir()}, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go deleted file mode 100644 index 3b6e947e..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -// MemInfo contains memory statistics of the host system. -type MemInfo struct { - // Total usable RAM (i.e. physical RAM minus a few reserved bits and the - // kernel binary code). - MemTotal int64 - - // Amount of free memory. - MemFree int64 - - // Total amount of swap space available. - SwapTotal int64 - - // Amount of swap space that is currently unused. - SwapFree int64 -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go deleted file mode 100644 index 9d83304f..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go +++ /dev/null @@ -1,66 +0,0 @@ -package system - -import ( - "bufio" - "io" - "os" - "strconv" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units" -) - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return nil, err - } - defer file.Close() - return parseMemInfo(file) -} - -// parseMemInfo parses the /proc/meminfo file into -// a MemInfo object given a io.Reader to the file. -// -// Throws error if there are problems reading from the file -func parseMemInfo(reader io.Reader) (*MemInfo, error) { - meminfo := &MemInfo{} - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - // Expected format: ["MemTotal:", "1234", "kB"] - parts := strings.Fields(scanner.Text()) - - // Sanity checks: Skip malformed entries. - if len(parts) < 3 || parts[2] != "kB" { - continue - } - - // Convert to bytes. - size, err := strconv.Atoi(parts[1]) - if err != nil { - continue - } - bytes := int64(size) * units.KiB - - switch parts[0] { - case "MemTotal:": - meminfo.MemTotal = bytes - case "MemFree:": - meminfo.MemFree = bytes - case "SwapTotal:": - meminfo.SwapTotal = bytes - case "SwapFree:": - meminfo.SwapFree = bytes - } - - } - - // Handle errors that may have occurred during the reading of the file. - if err := scanner.Err(); err != nil { - return nil, err - } - - return meminfo, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux_test.go deleted file mode 100644 index 87830ccb..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package system - -import ( - "strings" - "testing" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units" -) - -// TestMemInfo tests parseMemInfo with a static meminfo string -func TestMemInfo(t *testing.T) { - const input = ` - MemTotal: 1 kB - MemFree: 2 kB - SwapTotal: 3 kB - SwapFree: 4 kB - Malformed1: - Malformed2: 1 - Malformed3: 2 MB - Malformed4: X kB - ` - meminfo, err := parseMemInfo(strings.NewReader(input)) - if err != nil { - t.Fatal(err) - } - if meminfo.MemTotal != 1*units.KiB { - t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) - } - if meminfo.MemFree != 2*units.KiB { - t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) - } - if meminfo.SwapTotal != 3*units.KiB { - t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) - } - if meminfo.SwapFree != 4*units.KiB { - t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unix_test.go deleted file mode 100644 index 681bbfc1..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unix_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build linux freebsd - -package system - -import ( - "strings" - "testing" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units" -) - -// TestMemInfo tests parseMemInfo with a static meminfo string -func TestMemInfo(t *testing.T) { - const input = ` - MemTotal: 1 kB - MemFree: 2 kB - SwapTotal: 3 kB - SwapFree: 4 kB - Malformed1: - Malformed2: 1 - Malformed3: 2 MB - Malformed4: X kB - ` - meminfo, err := parseMemInfo(strings.NewReader(input)) - if err != nil { - t.Fatal(err) - } - if meminfo.MemTotal != 1*units.KiB { - t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) - } - if meminfo.MemFree != 2*units.KiB { - t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) - } - if meminfo.SwapTotal != 3*units.KiB { - t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) - } - if meminfo.SwapFree != 4*units.KiB { - t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go deleted file mode 100644 index 82ddd30c..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !linux,!windows - -package system - -// ReadMemInfo is not supported on platforms other than linux and windows. -func ReadMemInfo() (*MemInfo, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go deleted file mode 100644 index d4664259..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go +++ /dev/null @@ -1,44 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &MemInfo{}, nil - } - return &MemInfo{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go deleted file mode 100644 index 73958182..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev int) error { - return syscall.Mknod(path, mode, dev) -} - -// Mkdev is used to build the value of linux devices (in /dev/) which specifies major -// and minor number of the newly created device special file. -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go deleted file mode 100644 index 2e863c02..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package system - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go deleted file mode 100644 index 087034c5..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// StatT type contains status of a file. It contains metadata -// like permission, owner, group, size, etc about a file. -type StatT struct { - mode uint32 - uid uint32 - gid uint32 - rdev uint64 - size int64 - mtim syscall.Timespec -} - -// Mode returns file's permission mode. -func (s StatT) Mode() uint32 { - return s.mode -} - -// UID returns file's user id of owner. -func (s StatT) UID() uint32 { - return s.uid -} - -// GID returns file's group id of owner. -func (s StatT) GID() uint32 { - return s.gid -} - -// Rdev returns file's device ID (if it's special file). -func (s StatT) Rdev() uint64 { - return s.rdev -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() syscall.Timespec { - return s.mtim -} - -// GetLastModification returns file's last modification time. -func (s StatT) GetLastModification() syscall.Timespec { - return s.Mtim() -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go deleted file mode 100644 index d0fb6f15..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} - -// Stat takes a path to a file and returns -// a system.Stat_t type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go deleted file mode 100644 index 8b1eded1..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go +++ /dev/null @@ -1,33 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - rdev: s.Rdev, - mtim: s.Mtim}, nil -} - -// FromStatT exists only on linux, and loads a system.StatT from a -// syscal.Stat_t. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_test.go deleted file mode 100644 index 45341292..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package system - -import ( - "os" - "syscall" - "testing" -) - -// TestFromStatT tests fromStatT for a tempfile -func TestFromStatT(t *testing.T) { - file, _, _, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - stat := &syscall.Stat_t{} - err := syscall.Lstat(file, stat) - - s, err := fromStatT(stat) - if err != nil { - t.Fatal(err) - } - - if stat.Mode != s.Mode() { - t.Fatal("got invalid mode") - } - if stat.Uid != s.Uid() { - t.Fatal("got invalid uid") - } - if stat.Gid != s.Gid() { - t.Fatal("got invalid gid") - } - if stat.Rdev != s.Rdev() { - t.Fatal("got invalid rdev") - } - if stat.Mtim != s.Mtim() { - t.Fatal("got invalid mtim") - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unix_test.go deleted file mode 100644 index dee8d30a..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unix_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build linux freebsd - -package system - -import ( - "os" - "syscall" - "testing" -) - -// TestFromStatT tests fromStatT for a tempfile -func TestFromStatT(t *testing.T) { - file, _, _, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - stat := &syscall.Stat_t{} - err := syscall.Lstat(file, stat) - - s, err := fromStatT(stat) - if err != nil { - t.Fatal(err) - } - - if stat.Mode != s.Mode() { - t.Fatal("got invalid mode") - } - if stat.Uid != s.UID() { - t.Fatal("got invalid uid") - } - if stat.Gid != s.GID() { - t.Fatal("got invalid gid") - } - if stat.Rdev != s.Rdev() { - t.Fatal("got invalid rdev") - } - if stat.Mtim != s.Mtim() { - t.Fatal("got invalid mtim") - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go deleted file mode 100644 index 39490c62..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "time" -) - -// StatT type contains status of a file. It contains metadata -// like name, permission, size, etc about a file. -type StatT struct { - name string - size int64 - mode os.FileMode - modTime time.Time - isDir bool -} - -// Name returns file's name. -func (s StatT) Name() string { - return s.name -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mode returns file's permission mode. -func (s StatT) Mode() os.FileMode { - return s.mode -} - -// ModTime returns file's last modification time. -func (s StatT) ModTime() time.Time { - return s.modTime -} - -// IsDir returns whether file is actually a directory. -func (s StatT) IsDir() bool { - return s.isDir -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go deleted file mode 100644 index 50054765..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux freebsd - -package system - -import "syscall" - -// Unmount is a platform-specific helper function to call -// the unmount syscall. -func Unmount(dest string) { - syscall.Unmount(dest, 0) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go deleted file mode 100644 index 3a3a55b2..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package system - -// Unmount is a platform-specific helper function to call -// the unmount syscall. Not supported on Windows -func Unmount(dest string) { -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go deleted file mode 100644 index c670fcd7..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Umask sets current process's file mode creation mask to newmask -// and return oldmask. -func Umask(newmask int) (oldmask int, err error) { - return syscall.Umask(newmask), nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go deleted file mode 100644 index 13f1de17..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package system - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go deleted file mode 100644 index 0a161975..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go +++ /dev/null @@ -1,8 +0,0 @@ -package system - -import "syscall" - -// LUtimesNano is not supported by darwin platform. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go deleted file mode 100644 index e2eac3b5..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go +++ /dev/null @@ -1,22 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go deleted file mode 100644 index 007bfa8c..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// LUtimesNano is used to change access and modification time of the speficied path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - // These are not currently available in syscall - atFdCwd := -100 - atSymLinkNoFollow := 0x100 - - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_test.go deleted file mode 100644 index 350cce1e..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package system - -import ( - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" -) - -// prepareFiles creates files for testing in the temp directory -func prepareFiles(t *testing.T) (string, string, string, string) { - dir, err := ioutil.TempDir("", "docker-system-test") - if err != nil { - t.Fatal(err) - } - - file := filepath.Join(dir, "exist") - if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { - t.Fatal(err) - } - - invalid := filepath.Join(dir, "doesnt-exist") - - symlink := filepath.Join(dir, "symlink") - if err := os.Symlink(file, symlink); err != nil { - t.Fatal(err) - } - - return file, invalid, symlink, dir -} - -func TestLUtimesNano(t *testing.T) { - file, invalid, symlink, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - before, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - ts := []syscall.Timespec{{0, 0}, {0, 0}} - if err := LUtimesNano(symlink, ts); err != nil { - t.Fatal(err) - } - - symlinkInfo, err := os.Lstat(symlink) - if err != nil { - t.Fatal(err) - } - if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { - t.Fatal("The modification time of the symlink should be different") - } - - fileInfo, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - if before.ModTime().Unix() != fileInfo.ModTime().Unix() { - t.Fatal("The modification time of the file should be same") - } - - if err := LUtimesNano(invalid, ts); err == nil { - t.Fatal("Doesn't return an error on a non-existing file") - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unix_test.go deleted file mode 100644 index 1ee0d099..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unix_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// +build linux freebsd - -package system - -import ( - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" -) - -// prepareFiles creates files for testing in the temp directory -func prepareFiles(t *testing.T) (string, string, string, string) { - dir, err := ioutil.TempDir("", "docker-system-test") - if err != nil { - t.Fatal(err) - } - - file := filepath.Join(dir, "exist") - if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { - t.Fatal(err) - } - - invalid := filepath.Join(dir, "doesnt-exist") - - symlink := filepath.Join(dir, "symlink") - if err := os.Symlink(file, symlink); err != nil { - t.Fatal(err) - } - - return file, invalid, symlink, dir -} - -func TestLUtimesNano(t *testing.T) { - file, invalid, symlink, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - before, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - ts := []syscall.Timespec{{0, 0}, {0, 0}} - if err := LUtimesNano(symlink, ts); err != nil { - t.Fatal(err) - } - - symlinkInfo, err := os.Lstat(symlink) - if err != nil { - t.Fatal(err) - } - if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { - t.Fatal("The modification time of the symlink should be different") - } - - fileInfo, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - if before.ModTime().Unix() != fileInfo.ModTime().Unix() { - t.Fatal("The modification time of the file should be same") - } - - if err := LUtimesNano(invalid, ts); err == nil { - t.Fatal("Doesn't return an error on a non-existing file") - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go deleted file mode 100644 index 50c3a043..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux,!freebsd,!darwin - -package system - -import "syscall" - -// LUtimesNano is not supported on platforms other than linux, freebsd and darwin. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go deleted file mode 100644 index d2e2c057..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go +++ /dev/null @@ -1,63 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// It will returns a nil slice and nil error if the xattr is not set. -func Lgetxattr(path string, attr string) ([]byte, error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - - dest := make([]byte, 128) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno == syscall.ENODATA { - return nil, nil - } - if errno == syscall.ERANGE { - dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - } - if errno != 0 { - return nil, errno - } - - return dest[:sz], nil -} - -var _zero uintptr - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go deleted file mode 100644 index 0114f222..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package system - -// Lgetxattr is not supported on platforms other than linux. -func Lgetxattr(path string, attr string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -// Lsetxattr is not supported on platforms other than linux. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit_test.go deleted file mode 100644 index 1e8c881f..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package ulimit - -import "testing" - -func TestParseValid(t *testing.T) { - u1 := &Ulimit{"nofile", 1024, 512} - if u2, _ := Parse("nofile=512:1024"); *u1 != *u2 { - t.Fatalf("expected %q, but got %q", u1, u2) - } -} - -func TestParseInvalidLimitType(t *testing.T) { - if _, err := Parse("notarealtype=1024:1024"); err == nil { - t.Fatalf("expected error on invalid ulimit type") - } -} - -func TestParseBadFormat(t *testing.T) { - if _, err := Parse("nofile:1024:1024"); err == nil { - t.Fatal("expected error on bad syntax") - } - - if _, err := Parse("nofile"); err == nil { - t.Fatal("expected error on bad syntax") - } - - if _, err := Parse("nofile="); err == nil { - t.Fatal("expected error on bad syntax") - } - if _, err := Parse("nofile=:"); err == nil { - t.Fatal("expected error on bad syntax") - } - if _, err := Parse("nofile=:1024"); err == nil { - t.Fatal("expected error on bad syntax") - } -} - -func TestParseHardLessThanSoft(t *testing.T) { - if _, err := Parse("nofile:1024:1"); err == nil { - t.Fatal("expected error on hard limit less than soft limit") - } -} - -func TestParseInvalidValueType(t *testing.T) { - if _, err := Parse("nofile:asdf"); err == nil { - t.Fatal("expected error on bad value type") - } -} - -func TestStringOutput(t *testing.T) { - u := &Ulimit{"nofile", 1024, 512} - if s := u.String(); s != "nofile=512:1024" { - t.Fatal("expected String to return nofile=512:1024, but got", s) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go deleted file mode 100644 index c219a8a9..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go +++ /dev/null @@ -1,33 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours()); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*3 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration_test.go deleted file mode 100644 index fcfb6b7b..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package units - -import ( - "testing" - "time" -) - -func TestHumanDuration(t *testing.T) { - // Useful duration abstractions - day := 24 * time.Hour - week := 7 * day - month := 30 * day - year := 365 * day - - assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) - assertEquals(t, "47 seconds", HumanDuration(47*time.Second)) - assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) - assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) - assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) - assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) - assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) - assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute)) - assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) - assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute)) - assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) - assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) - assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) - assertEquals(t, "2 days", HumanDuration(2*day)) - assertEquals(t, "7 days", HumanDuration(7*day)) - assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) - assertEquals(t, "2 weeks", HumanDuration(2*week)) - assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) - assertEquals(t, "3 weeks", HumanDuration(3*week)) - assertEquals(t, "4 weeks", HumanDuration(4*week)) - assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) - assertEquals(t, "4 weeks", HumanDuration(1*month)) - assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) - assertEquals(t, "8 weeks", HumanDuration(2*month)) - assertEquals(t, "3 months", HumanDuration(3*month+1*week)) - assertEquals(t, "5 months", HumanDuration(5*month+2*week)) - assertEquals(t, "13 months", HumanDuration(13*month)) - assertEquals(t, "23 months", HumanDuration(23*month)) - assertEquals(t, "24 months", HumanDuration(24*month)) - assertEquals(t, "2 years", HumanDuration(24*month+2*week)) - assertEquals(t, "3 years", HumanDuration(3*year+2*month)) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go deleted file mode 100644 index 2fde3b41..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go +++ /dev/null @@ -1,95 +0,0 @@ -package units - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[string]int64 - -var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) -) - -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - i := 0 - for size >= base { - size = size / base - i++ - } - return fmt.Sprintf(format, size, _map[i]) -} - -// HumanSize returns a human-readable approximation of a size -// using SI standard (eg. "44kB", "17MB"). -func HumanSize(size float64) string { - return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 3 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - size, err := strconv.ParseInt(matches[1], 10, 0) - if err != nil { - return -1, err - } - - unitPrefix := strings.ToLower(matches[2]) - if mul, ok := uMap[unitPrefix]; ok { - size *= mul - } - - return size, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size_test.go deleted file mode 100644 index 67c3b81e..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package units - -import ( - "reflect" - "runtime" - "strings" - "testing" -) - -func TestBytesSize(t *testing.T) { - assertEquals(t, "1 KiB", BytesSize(1024)) - assertEquals(t, "1 MiB", BytesSize(1024*1024)) - assertEquals(t, "1 MiB", BytesSize(1048576)) - assertEquals(t, "2 MiB", BytesSize(2*MiB)) - assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB)) - assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB)) - assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB)) -} - -func TestHumanSize(t *testing.T) { - assertEquals(t, "1 kB", HumanSize(1000)) - assertEquals(t, "1.024 kB", HumanSize(1024)) - assertEquals(t, "1 MB", HumanSize(1000000)) - assertEquals(t, "1.049 MB", HumanSize(1048576)) - assertEquals(t, "2 MB", HumanSize(2*MB)) - assertEquals(t, "3.42 GB", HumanSize(float64(3.42*GB))) - assertEquals(t, "5.372 TB", HumanSize(float64(5.372*TB))) - assertEquals(t, "2.22 PB", HumanSize(float64(2.22*PB))) -} - -func TestFromHumanSize(t *testing.T) { - assertSuccessEquals(t, 32, FromHumanSize, "32") - assertSuccessEquals(t, 32, FromHumanSize, "32b") - assertSuccessEquals(t, 32, FromHumanSize, "32B") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") - assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") - assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") - assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") - assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") - - assertError(t, FromHumanSize, "") - assertError(t, FromHumanSize, "hello") - assertError(t, FromHumanSize, "-32") - assertError(t, FromHumanSize, "32.3") - assertError(t, FromHumanSize, " 32 ") - assertError(t, FromHumanSize, "32.3Kb") - assertError(t, FromHumanSize, "32 mb") - assertError(t, FromHumanSize, "32m b") - assertError(t, FromHumanSize, "32bm") -} - -func TestRAMInBytes(t *testing.T) { - assertSuccessEquals(t, 32, RAMInBytes, "32") - assertSuccessEquals(t, 32, RAMInBytes, "32b") - assertSuccessEquals(t, 32, RAMInBytes, "32B") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") - assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") - assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") - assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") - - assertError(t, RAMInBytes, "") - assertError(t, RAMInBytes, "hello") - assertError(t, RAMInBytes, "-32") - assertError(t, RAMInBytes, "32.3") - assertError(t, RAMInBytes, " 32 ") - assertError(t, RAMInBytes, "32.3Kb") - assertError(t, RAMInBytes, "32 mb") - assertError(t, RAMInBytes, "32m b") - assertError(t, RAMInBytes, "32bm") -} - -func assertEquals(t *testing.T, expected, actual interface{}) { - if expected != actual { - t.Errorf("Expected '%v' but got '%v'", expected, actual) - } -} - -// func that maps to the parse function signatures as testing abstraction -type parseFn func(string) (int64, error) - -// Define 'String()' for pretty-print -func (fn parseFn) String() string { - fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() - return fnName[strings.LastIndex(fnName, ".")+1:] -} - -func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { - res, err := fn(arg) - if err != nil || res != expected { - t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) - } -} - -func assertError(t *testing.T, fn parseFn, arg string) { - res, err := fn(arg) - if err == nil && res != -1 { - t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/LICENSE deleted file mode 100644 index e87a115e..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md deleted file mode 100644 index 036e5313..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# cleanhttp - -Functions for accessing "clean" Go http.Client values - -------------- - -The Go standard library contains a default `http.Client` called -`http.DefaultClient`. It is a common idiom in Go code to start with -`http.DefaultClient` and tweak it as necessary, and in fact, this is -encouraged; from the `http` package documentation: - -> The Client's Transport typically has internal state (cached TCP connections), -so Clients should be reused instead of created as needed. Clients are safe for -concurrent use by multiple goroutines. - -Unfortunately, this is a shared value, and it is not uncommon for libraries to -assume that they are free to modify it at will. With enough dependencies, it -can be very easy to encounter strange problems and race conditions due to -manipulation of this shared value across libraries and goroutines (clients are -safe for concurrent use, but writing values to the client struct itself is not -protected). - -Making things worse is the fact that a bare `http.Client` will use a default -`http.Transport` called `http.DefaultTransport`, which is another global value -that behaves the same way. So it is not simply enough to replace -`http.DefaultClient` with `&http.Client{}`. - -This repository provides some simple functions to get a "clean" `http.Client` --- one that uses the same default values as the Go standard library, but -returns a client that does not share any state with other clients. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/cleanhttp.go deleted file mode 100644 index 1676d79c..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ /dev/null @@ -1,28 +0,0 @@ -package cleanhttp - -import ( - "net" - "net/http" - "time" -) - -// DefaultTransport returns a new http.Transport with the same default values -// as http.DefaultTransport -func DefaultTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - } -} - -// DefaultClient returns a new http.Client with the same default values as -// http.Client, but with a non-shared Transport -func DefaultClient() *http.Client { - return &http.Client{ - Transport: DefaultTransport(), - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS deleted file mode 100644 index edbe2006..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Tianon Gravi (@tianon) -Aleksa Sarai (@cyphar) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go deleted file mode 100644 index 6f8a982f..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go +++ /dev/null @@ -1,108 +0,0 @@ -package user - -import ( - "errors" - "fmt" - "syscall" -) - -var ( - // The current operating system does not provide the required data for user lookups. - ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") -) - -func lookupUser(filter func(u User) bool) (User, error) { - // Get operating system-specific passwd reader-closer. - passwd, err := GetPasswd() - if err != nil { - return User{}, err - } - defer passwd.Close() - - // Get the users. - users, err := ParsePasswdFilter(passwd, filter) - if err != nil { - return User{}, err - } - - // No user entries found. - if len(users) == 0 { - return User{}, fmt.Errorf("no matching entries in passwd file") - } - - // Assume the first entry is the "correct" one. - return users[0], nil -} - -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(syscall.Getuid()) -} - -// LookupUser looks up a user by their username in /etc/passwd. If the user -// cannot be found (or there is no /etc/passwd file on the filesystem), then -// LookupUser returns an error. -func LookupUser(username string) (User, error) { - return lookupUser(func(u User) bool { - return u.Name == username - }) -} - -// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot -// be found (or there is no /etc/passwd file on the filesystem), then LookupId -// returns an error. -func LookupUid(uid int) (User, error) { - return lookupUser(func(u User) bool { - return u.Uid == uid - }) -} - -func lookupGroup(filter func(g Group) bool) (Group, error) { - // Get operating system-specific group reader-closer. - group, err := GetGroup() - if err != nil { - return Group{}, err - } - defer group.Close() - - // Get the users. - groups, err := ParseGroupFilter(group, filter) - if err != nil { - return Group{}, err - } - - // No user entries found. - if len(groups) == 0 { - return Group{}, fmt.Errorf("no matching entries in group file") - } - - // Assume the first entry is the "correct" one. - return groups[0], nil -} - -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(syscall.Getgid()) -} - -// LookupGroup looks up a group by its name in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGroup -// returns an error. -func LookupGroup(groupname string) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Name == groupname - }) -} - -// LookupGid looks up a group by its group id in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGid -// returns an error. -func LookupGid(gid int) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Gid == gid - }) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go deleted file mode 100644 index 758b734c..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package user - -import ( - "io" - "os" -) - -// Unix-specific path to the passwd and group formatted files. -const ( - unixPasswdPath = "/etc/passwd" - unixGroupPath = "/etc/group" -) - -func GetPasswdPath() (string, error) { - return unixPasswdPath, nil -} - -func GetPasswd() (io.ReadCloser, error) { - return os.Open(unixPasswdPath) -} - -func GetGroupPath() (string, error) { - return unixGroupPath, nil -} - -func GetGroup() (io.ReadCloser, error) { - return os.Open(unixGroupPath) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go deleted file mode 100644 index 72179488..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris - -package user - -import "io" - -func GetPasswdPath() (string, error) { - return "", ErrUnsupported -} - -func GetPasswd() (io.ReadCloser, error) { - return nil, ErrUnsupported -} - -func GetGroupPath() (string, error) { - return "", ErrUnsupported -} - -func GetGroup() (io.ReadCloser, error) { - return nil, ErrUnsupported -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go deleted file mode 100644 index e6375ea4..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go +++ /dev/null @@ -1,418 +0,0 @@ -package user - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const ( - minId = 0 - maxId = 1<<31 - 1 //for 32-bit systems compatibility -) - -var ( - ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId) -) - -type User struct { - Name string - Pass string - Uid int - Gid int - Gecos string - Home string - Shell string -} - -type Group struct { - Name string - Pass string - Gid int - List []string -} - -func parseLine(line string, v ...interface{}) { - if line == "" { - return - } - - parts := strings.Split(line, ":") - for i, p := range parts { - if len(v) <= i { - // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files - break - } - - switch e := v[i].(type) { - case *string: - // "root", "adm", "/bin/bash" - *e = p - case *int: - // "0", "4", "1000" - // ignore string to int conversion errors, for great "tolerance" of naughty configuration files - *e, _ = strconv.Atoi(p) - case *[]string: - // "", "root", "root,adm,daemon" - if p != "" { - *e = strings.Split(p, ",") - } else { - *e = []string{} - } - default: - // panic, because this is a programming/logic error, not a runtime one - panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") - } - } -} - -func ParsePasswdFile(path string) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswd(passwd) -} - -func ParsePasswd(passwd io.Reader) ([]User, error) { - return ParsePasswdFilter(passwd, nil) -} - -func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswdFilter(passwd, filter) -} - -func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { - if r == nil { - return nil, fmt.Errorf("nil source for passwd-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []User{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" { - continue - } - - // see: man 5 passwd - // name:password:UID:GID:GECOS:directory:shell - // Name:Pass:Uid:Gid:Gecos:Home:Shell - // root:x:0:0:root:/root:/bin/bash - // adm:x:3:4:adm:/var/adm:/bin/false - p := User{} - parseLine( - text, - &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, - ) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -func ParseGroupFile(path string) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroup(group) -} - -func ParseGroup(group io.Reader) ([]Group, error) { - return ParseGroupFilter(group, nil) -} - -func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroupFilter(group, filter) -} - -func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { - if r == nil { - return nil, fmt.Errorf("nil source for group-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []Group{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := s.Text() - if text == "" { - continue - } - - // see: man 5 group - // group_name:password:GID:user_list - // Name:Pass:Gid:List - // root:x:0:root - // adm:x:4:root,adm,daemon - p := Group{} - parseLine( - text, - &p.Name, &p.Pass, &p.Gid, &p.List, - ) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -type ExecUser struct { - Uid, Gid int - Sgids []int - Home string -} - -// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the -// given file paths and uses that data as the arguments to GetExecUser. If the -// files cannot be opened for any reason, the error is ignored and a nil -// io.Reader is passed instead. -func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { - passwd, err := os.Open(passwdPath) - if err != nil { - passwd = nil - } else { - defer passwd.Close() - } - - group, err := os.Open(groupPath) - if err != nil { - group = nil - } else { - defer group.Close() - } - - return GetExecUser(userSpec, defaults, passwd, group) -} - -// GetExecUser parses a user specification string (using the passwd and group -// readers as sources for /etc/passwd and /etc/group data, respectively). In -// the case of blank fields or missing data from the sources, the values in -// defaults is used. -// -// GetExecUser will return an error if a user or group literal could not be -// found in any entry in passwd and group respectively. -// -// Examples of valid user specifications are: -// * "" -// * "user" -// * "uid" -// * "user:group" -// * "uid:gid -// * "user:gid" -// * "uid:group" -func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { - var ( - userArg, groupArg string - name string - ) - - if defaults == nil { - defaults = new(ExecUser) - } - - // Copy over defaults. - user := &ExecUser{ - Uid: defaults.Uid, - Gid: defaults.Gid, - Sgids: defaults.Sgids, - Home: defaults.Home, - } - - // Sgids slice *cannot* be nil. - if user.Sgids == nil { - user.Sgids = []int{} - } - - // allow for userArg to have either "user" syntax, or optionally "user:group" syntax - parseLine(userSpec, &userArg, &groupArg) - - users, err := ParsePasswdFilter(passwd, func(u User) bool { - if userArg == "" { - return u.Uid == user.Uid - } - return u.Name == userArg || strconv.Itoa(u.Uid) == userArg - }) - if err != nil && passwd != nil { - if userArg == "" { - userArg = strconv.Itoa(user.Uid) - } - return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) - } - - haveUser := users != nil && len(users) > 0 - if haveUser { - // if we found any user entries that matched our filter, let's take the first one as "correct" - name = users[0].Name - user.Uid = users[0].Uid - user.Gid = users[0].Gid - user.Home = users[0].Home - } else if userArg != "" { - // we asked for a user but didn't find them... let's check to see if we wanted a numeric user - user.Uid, err = strconv.Atoi(userArg) - if err != nil { - // not numeric - we have to bail - return nil, fmt.Errorf("Unable to find user %v", userArg) - } - - // Must be inside valid uid range. - if user.Uid < minId || user.Uid > maxId { - return nil, ErrRange - } - - // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit - } - - if groupArg != "" || name != "" { - groups, err := ParseGroupFilter(group, func(g Group) bool { - // Explicit group format takes precedence. - if groupArg != "" { - return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg - } - - // Check if user is a member. - for _, u := range g.List { - if u == name { - return true - } - } - - return false - }) - if err != nil && group != nil { - return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) - } - - haveGroup := groups != nil && len(groups) > 0 - if groupArg != "" { - if haveGroup { - // if we found any group entries that matched our filter, let's take the first one as "correct" - user.Gid = groups[0].Gid - } else { - // we asked for a group but didn't find id... let's check to see if we wanted a numeric group - user.Gid, err = strconv.Atoi(groupArg) - if err != nil { - // not numeric - we have to bail - return nil, fmt.Errorf("Unable to find group %v", groupArg) - } - - // Ensure gid is inside gid range. - if user.Gid < minId || user.Gid > maxId { - return nil, ErrRange - } - - // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit - } - } else if haveGroup { - // If implicit group format, fill supplementary gids. - user.Sgids = make([]int, len(groups)) - for i, group := range groups { - user.Sgids[i] = group.Gid - } - } - } - - return user, nil -} - -// GetAdditionalGroups looks up a list of groups by name or group id -// against the given /etc/group formatted data. If a group name cannot -// be found, an error will be returned. If a group id cannot be found, -// or the given group data is nil, the id will be returned as-is -// provided it is in the legal range. -func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { - var groups = []Group{} - if group != nil { - var err error - groups, err = ParseGroupFilter(group, func(g Group) bool { - for _, ag := range additionalGroups { - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - return true - } - } - return false - }) - if err != nil { - return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) - } - } - - gidMap := make(map[int]struct{}) - for _, ag := range additionalGroups { - var found bool - for _, g := range groups { - // if we found a matched group either by name or gid, take the - // first matched as correct - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - if _, ok := gidMap[g.Gid]; !ok { - gidMap[g.Gid] = struct{}{} - found = true - break - } - } - } - // we asked for a group but didn't find it. let's check to see - // if we wanted a numeric group - if !found { - gid, err := strconv.Atoi(ag) - if err != nil { - return nil, fmt.Errorf("Unable to find group %s", ag) - } - // Ensure gid is inside gid range. - if gid < minId || gid > maxId { - return nil, ErrRange - } - gidMap[gid] = struct{}{} - } - } - gids := []int{} - for gid := range gidMap { - gids = append(gids, gid) - } - return gids, nil -} - -// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups -// that opens the groupPath given and gives it as an argument to -// GetAdditionalGroups. -func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { - group, err := os.Open(groupPath) - if err == nil { - defer group.Close() - } - return GetAdditionalGroups(additionalGroups, group) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go deleted file mode 100644 index 53b2289b..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go +++ /dev/null @@ -1,472 +0,0 @@ -package user - -import ( - "io" - "reflect" - "sort" - "strconv" - "strings" - "testing" -) - -func TestUserParseLine(t *testing.T) { - var ( - a, b string - c []string - d int - ) - - parseLine("", &a, &b) - if a != "" || b != "" { - t.Fatalf("a and b should be empty ('%v', '%v')", a, b) - } - - parseLine("a", &a, &b) - if a != "a" || b != "" { - t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b) - } - - parseLine("bad boys:corny cows", &a, &b) - if a != "bad boys" || b != "corny cows" { - t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b) - } - - parseLine("", &c) - if len(c) != 0 { - t.Fatalf("c should be empty (%#v)", c) - } - - parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c) - if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" { - t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c) - } - - parseLine("::::::::::", &a, &b, &c) - if a != "" || b != "" || len(c) != 0 { - t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c) - } - - parseLine("not a number", &d) - if d != 0 { - t.Fatalf("d should be 0 (%v)", d) - } - - parseLine("b:12:c", &a, &d, &b) - if a != "b" || b != "c" || d != 12 { - t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d) - } -} - -func TestUserParsePasswd(t *testing.T) { - users, err := ParsePasswdFilter(strings.NewReader(` -root:x:0:0:root:/root:/bin/bash -adm:x:3:4:adm:/var/adm:/bin/false -this is just some garbage data -`), nil) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if len(users) != 3 { - t.Fatalf("Expected 3 users, got %v", len(users)) - } - if users[0].Uid != 0 || users[0].Name != "root" { - t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name) - } - if users[1].Uid != 3 || users[1].Name != "adm" { - t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name) - } -} - -func TestUserParseGroup(t *testing.T) { - groups, err := ParseGroupFilter(strings.NewReader(` -root:x:0:root -adm:x:4:root,adm,daemon -this is just some garbage data -`), nil) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if len(groups) != 3 { - t.Fatalf("Expected 3 groups, got %v", len(groups)) - } - if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 { - t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List)) - } - if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 { - t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) - } -} - -func TestValidGetExecUser(t *testing.T) { - const passwdContent = ` -root:x:0:0:root user:/root:/bin/bash -adm:x:42:43:adm:/var/adm:/bin/false -this is just some garbage data -` - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -this is just some garbage data -` - defaultExecUser := ExecUser{ - Uid: 8888, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - } - - tests := []struct { - ref string - expected ExecUser - }{ - { - ref: "root", - expected: ExecUser{ - Uid: 0, - Gid: 0, - Sgids: []int{0, 1234}, - Home: "/root", - }, - }, - { - ref: "adm", - expected: ExecUser{ - Uid: 42, - Gid: 43, - Sgids: []int{1234}, - Home: "/var/adm", - }, - }, - { - ref: "root:adm", - expected: ExecUser{ - Uid: 0, - Gid: 43, - Sgids: defaultExecUser.Sgids, - Home: "/root", - }, - }, - { - ref: "adm:1234", - expected: ExecUser{ - Uid: 42, - Gid: 1234, - Sgids: defaultExecUser.Sgids, - Home: "/var/adm", - }, - }, - { - ref: "42:1234", - expected: ExecUser{ - Uid: 42, - Gid: 1234, - Sgids: defaultExecUser.Sgids, - Home: "/var/adm", - }, - }, - { - ref: "1337:1234", - expected: ExecUser{ - Uid: 1337, - Gid: 1234, - Sgids: defaultExecUser.Sgids, - Home: defaultExecUser.Home, - }, - }, - { - ref: "1337", - expected: ExecUser{ - Uid: 1337, - Gid: defaultExecUser.Gid, - Sgids: defaultExecUser.Sgids, - Home: defaultExecUser.Home, - }, - }, - { - ref: "", - expected: ExecUser{ - Uid: defaultExecUser.Uid, - Gid: defaultExecUser.Gid, - Sgids: defaultExecUser.Sgids, - Home: defaultExecUser.Home, - }, - }, - } - - for _, test := range tests { - passwd := strings.NewReader(passwdContent) - group := strings.NewReader(groupContent) - - execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) - if err != nil { - t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) - t.Fail() - continue - } - - if !reflect.DeepEqual(test.expected, *execUser) { - t.Logf("got: %#v", execUser) - t.Logf("expected: %#v", test.expected) - t.Fail() - continue - } - } -} - -func TestInvalidGetExecUser(t *testing.T) { - const passwdContent = ` -root:x:0:0:root user:/root:/bin/bash -adm:x:42:43:adm:/var/adm:/bin/false -this is just some garbage data -` - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -this is just some garbage data -` - - tests := []string{ - // No such user/group. - "notuser", - "notuser:notgroup", - "root:notgroup", - "notuser:adm", - "8888:notgroup", - "notuser:8888", - - // Invalid user/group values. - "-1:0", - "0:-3", - "-5:-2", - } - - for _, test := range tests { - passwd := strings.NewReader(passwdContent) - group := strings.NewReader(groupContent) - - execUser, err := GetExecUser(test, nil, passwd, group) - if err == nil { - t.Logf("got unexpected success when parsing '%s': %#v", test, execUser) - t.Fail() - continue - } - } -} - -func TestGetExecUserNilSources(t *testing.T) { - const passwdContent = ` -root:x:0:0:root user:/root:/bin/bash -adm:x:42:43:adm:/var/adm:/bin/false -this is just some garbage data -` - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -this is just some garbage data -` - - defaultExecUser := ExecUser{ - Uid: 8888, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - } - - tests := []struct { - ref string - passwd, group bool - expected ExecUser - }{ - { - ref: "", - passwd: false, - group: false, - expected: ExecUser{ - Uid: 8888, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - }, - }, - { - ref: "root", - passwd: true, - group: false, - expected: ExecUser{ - Uid: 0, - Gid: 0, - Sgids: []int{8888}, - Home: "/root", - }, - }, - { - ref: "0", - passwd: false, - group: false, - expected: ExecUser{ - Uid: 0, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - }, - }, - { - ref: "0:0", - passwd: false, - group: false, - expected: ExecUser{ - Uid: 0, - Gid: 0, - Sgids: []int{8888}, - Home: "/8888", - }, - }, - } - - for _, test := range tests { - var passwd, group io.Reader - - if test.passwd { - passwd = strings.NewReader(passwdContent) - } - - if test.group { - group = strings.NewReader(groupContent) - } - - execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) - if err != nil { - t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) - t.Fail() - continue - } - - if !reflect.DeepEqual(test.expected, *execUser) { - t.Logf("got: %#v", execUser) - t.Logf("expected: %#v", test.expected) - t.Fail() - continue - } - } -} - -func TestGetAdditionalGroups(t *testing.T) { - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -adm:x:4343:root,adm-duplicate -this is just some garbage data -` - tests := []struct { - groups []string - expected []int - hasError bool - }{ - { - // empty group - groups: []string{}, - expected: []int{}, - }, - { - // single group - groups: []string{"adm"}, - expected: []int{43}, - }, - { - // multiple groups - groups: []string{"adm", "grp"}, - expected: []int{43, 1234}, - }, - { - // invalid group - groups: []string{"adm", "grp", "not-exist"}, - expected: nil, - hasError: true, - }, - { - // group with numeric id - groups: []string{"43"}, - expected: []int{43}, - }, - { - // group with unknown numeric id - groups: []string{"adm", "10001"}, - expected: []int{43, 10001}, - }, - { - // groups specified twice with numeric and name - groups: []string{"adm", "43"}, - expected: []int{43}, - }, - { - // groups with too small id - groups: []string{"-1"}, - expected: nil, - hasError: true, - }, - { - // groups with too large id - groups: []string{strconv.Itoa(1 << 31)}, - expected: nil, - hasError: true, - }, - } - - for _, test := range tests { - group := strings.NewReader(groupContent) - - gids, err := GetAdditionalGroups(test.groups, group) - if test.hasError && err == nil { - t.Errorf("Parse(%#v) expects error but has none", test) - continue - } - if !test.hasError && err != nil { - t.Errorf("Parse(%#v) has error %v", test, err) - continue - } - sort.Sort(sort.IntSlice(gids)) - if !reflect.DeepEqual(gids, test.expected) { - t.Errorf("Gids(%v), expect %v from groups %v", gids, test.expected, test.groups) - } - } -} - -func TestGetAdditionalGroupsNumeric(t *testing.T) { - tests := []struct { - groups []string - expected []int - hasError bool - }{ - { - // numeric groups only - groups: []string{"1234", "5678"}, - expected: []int{1234, 5678}, - }, - { - // numeric and alphabetic - groups: []string{"1234", "fake"}, - expected: nil, - hasError: true, - }, - } - - for _, test := range tests { - gids, err := GetAdditionalGroups(test.groups, nil) - if test.hasError && err == nil { - t.Errorf("Parse(%#v) expects error but has none", test) - continue - } - if !test.hasError && err != nil { - t.Errorf("Parse(%#v) has error %v", test, err) - continue - } - sort.Sort(sort.IntSlice(gids)) - if !reflect.DeepEqual(gids, test.expected) { - t.Errorf("Gids(%v), expect %v from groups %v", gids, test.expected, test.groups) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/image.go b/vendor/github.com/fsouza/go-dockerclient/image.go deleted file mode 100644 index fd13bc23..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/image.go +++ /dev/null @@ -1,597 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "os" - "time" -) - -// APIImages represent an image returned in the ListImages call. -type APIImages struct { - ID string `json:"Id" yaml:"Id"` - RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty"` - Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` - Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` - VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"` - ParentID string `json:"ParentId,omitempty" yaml:"ParentId,omitempty"` - RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` -} - -// Image is the type representing a docker image and its various properties -type Image struct { - ID string `json:"Id" yaml:"Id"` - Parent string `json:"Parent,omitempty" yaml:"Parent,omitempty"` - Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty"` - Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"` - Container string `json:"Container,omitempty" yaml:"Container,omitempty"` - ContainerConfig Config `json:"ContainerConfig,omitempty" yaml:"ContainerConfig,omitempty"` - DockerVersion string `json:"DockerVersion,omitempty" yaml:"DockerVersion,omitempty"` - Author string `json:"Author,omitempty" yaml:"Author,omitempty"` - Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"` - Architecture string `json:"Architecture,omitempty" yaml:"Architecture,omitempty"` - Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` - VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"` - RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty"` -} - -// ImagePre012 serves the same purpose as the Image type except that it is for -// earlier versions of the Docker API (pre-012 to be specific) -type ImagePre012 struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - Container string `json:"container,omitempty"` - ContainerConfig Config `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - Config *Config `json:"config,omitempty"` - Architecture string `json:"architecture,omitempty"` - Size int64 `json:"size,omitempty"` -} - -var ( - // ErrNoSuchImage is the error returned when the image does not exist. - ErrNoSuchImage = errors.New("no such image") - - // ErrMissingRepo is the error returned when the remote repository is - // missing. - ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'") - - // ErrMissingOutputStream is the error returned when no output stream - // is provided to some calls, like BuildImage. - ErrMissingOutputStream = errors.New("missing output stream") - - // ErrMultipleContexts is the error returned when both a ContextDir and - // InputStream are provided in BuildImageOptions - ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream") - - // ErrMustSpecifyNames is the error rreturned when the Names field on - // ExportImagesOptions is nil or empty - ErrMustSpecifyNames = errors.New("must specify at least one name to export") -) - -// ListImagesOptions specify parameters to the ListImages function. -// -// See https://goo.gl/xBe1u3 for more details. -type ListImagesOptions struct { - All bool - Filters map[string][]string - Digests bool - Filter string -} - -// ListImages returns the list of available images in the server. -// -// See https://goo.gl/xBe1u3 for more details. -func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) { - path := "/images/json?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var images []APIImages - if err := json.NewDecoder(resp.Body).Decode(&images); err != nil { - return nil, err - } - return images, nil -} - -// ImageHistory represent a layer in an image's history returned by the -// ImageHistory call. -type ImageHistory struct { - ID string `json:"Id" yaml:"Id"` - Tags []string `json:"Tags,omitempty" yaml:"Tags,omitempty"` - Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` - CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty"` - Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` -} - -// ImageHistory returns the history of the image by its name or ID. -// -// See https://goo.gl/8bnTId for more details. -func (c *Client) ImageHistory(name string) ([]ImageHistory, error) { - resp, err := c.do("GET", "/images/"+name+"/history", doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, ErrNoSuchImage - } - return nil, err - } - defer resp.Body.Close() - var history []ImageHistory - if err := json.NewDecoder(resp.Body).Decode(&history); err != nil { - return nil, err - } - return history, nil -} - -// RemoveImage removes an image by its name or ID. -// -// See https://goo.gl/V3ZWnK for more details. -func (c *Client) RemoveImage(name string) error { - resp, err := c.do("DELETE", "/images/"+name, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return ErrNoSuchImage - } - return err - } - resp.Body.Close() - return nil -} - -// RemoveImageOptions present the set of options available for removing an image -// from a registry. -// -// See https://goo.gl/V3ZWnK for more details. -type RemoveImageOptions struct { - Force bool `qs:"force"` - NoPrune bool `qs:"noprune"` -} - -// RemoveImageExtended removes an image by its name or ID. -// Extra params can be passed, see RemoveImageOptions -// -// See https://goo.gl/V3ZWnK for more details. -func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error { - uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts)) - resp, err := c.do("DELETE", uri, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return ErrNoSuchImage - } - return err - } - resp.Body.Close() - return nil -} - -// InspectImage returns an image by its name or ID. -// -// See https://goo.gl/jHPcg6 for more details. -func (c *Client) InspectImage(name string) (*Image, error) { - resp, err := c.do("GET", "/images/"+name+"/json", doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, ErrNoSuchImage - } - return nil, err - } - defer resp.Body.Close() - - var image Image - - // if the caller elected to skip checking the server's version, assume it's the latest - if c.SkipServerVersionCheck || c.expectedAPIVersion.GreaterThanOrEqualTo(apiVersion112) { - if err := json.NewDecoder(resp.Body).Decode(&image); err != nil { - return nil, err - } - } else { - var imagePre012 ImagePre012 - if err := json.NewDecoder(resp.Body).Decode(&imagePre012); err != nil { - return nil, err - } - - image.ID = imagePre012.ID - image.Parent = imagePre012.Parent - image.Comment = imagePre012.Comment - image.Created = imagePre012.Created - image.Container = imagePre012.Container - image.ContainerConfig = imagePre012.ContainerConfig - image.DockerVersion = imagePre012.DockerVersion - image.Author = imagePre012.Author - image.Config = imagePre012.Config - image.Architecture = imagePre012.Architecture - image.Size = imagePre012.Size - } - - return &image, nil -} - -// PushImageOptions represents options to use in the PushImage method. -// -// See https://goo.gl/zPtZaT for more details. -type PushImageOptions struct { - // Name of the image - Name string - - // Tag of the image - Tag string - - // Registry server to push the image - Registry string - - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` -} - -// PushImage pushes an image to a remote registry, logging progress to w. -// -// An empty instance of AuthConfiguration may be used for unauthenticated -// pushes. -// -// See https://goo.gl/zPtZaT for more details. -func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error { - if opts.Name == "" { - return ErrNoSuchImage - } - headers, err := headersWithAuth(auth) - if err != nil { - return err - } - name := opts.Name - opts.Name = "" - path := "/images/" + name + "/push?" + queryString(&opts) - return c.stream("POST", path, streamOptions{ - setRawTerminal: true, - rawJSONStream: opts.RawJSONStream, - headers: headers, - stdout: opts.OutputStream, - }) -} - -// PullImageOptions present the set of options available for pulling an image -// from a registry. -// -// See https://goo.gl/iJkZjD for more details. -type PullImageOptions struct { - Repository string `qs:"fromImage"` - Registry string - Tag string - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` -} - -// PullImage pulls an image from a remote registry, logging progress to -// opts.OutputStream. -// -// See https://goo.gl/iJkZjD for more details. -func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error { - if opts.Repository == "" { - return ErrNoSuchImage - } - - headers, err := headersWithAuth(auth) - if err != nil { - return err - } - return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream) -} - -func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool) error { - path := "/images/create?" + qs - return c.stream("POST", path, streamOptions{ - setRawTerminal: true, - rawJSONStream: rawJSONStream, - headers: headers, - in: in, - stdout: w, - }) -} - -// LoadImageOptions represents the options for LoadImage Docker API Call -// -// See https://goo.gl/JyClMX for more details. -type LoadImageOptions struct { - InputStream io.Reader -} - -// LoadImage imports a tarball docker image -// -// See https://goo.gl/JyClMX for more details. -func (c *Client) LoadImage(opts LoadImageOptions) error { - return c.stream("POST", "/images/load", streamOptions{ - setRawTerminal: true, - in: opts.InputStream, - }) -} - -// ExportImageOptions represent the options for ExportImage Docker API call. -// -// See https://goo.gl/le7vK8 for more details. -type ExportImageOptions struct { - Name string - OutputStream io.Writer -} - -// ExportImage exports an image (as a tar file) into the stream. -// -// See https://goo.gl/le7vK8 for more details. -func (c *Client) ExportImage(opts ExportImageOptions) error { - return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - }) -} - -// ExportImagesOptions represent the options for ExportImages Docker API call -// -// See https://goo.gl/huC7HA for more details. -type ExportImagesOptions struct { - Names []string - OutputStream io.Writer `qs:"-"` -} - -// ExportImages exports one or more images (as a tar file) into the stream -// -// See https://goo.gl/huC7HA for more details. -func (c *Client) ExportImages(opts ExportImagesOptions) error { - if opts.Names == nil || len(opts.Names) == 0 { - return ErrMustSpecifyNames - } - return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - }) -} - -// ImportImageOptions present the set of informations available for importing -// an image from a source file or the stdin. -// -// See https://goo.gl/iJkZjD for more details. -type ImportImageOptions struct { - Repository string `qs:"repo"` - Source string `qs:"fromSrc"` - Tag string `qs:"tag"` - - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` -} - -// ImportImage imports an image from a url, a file or stdin -// -// See https://goo.gl/iJkZjD for more details. -func (c *Client) ImportImage(opts ImportImageOptions) error { - if opts.Repository == "" { - return ErrNoSuchImage - } - if opts.Source != "-" { - opts.InputStream = nil - } - if opts.Source != "-" && !isURL(opts.Source) { - f, err := os.Open(opts.Source) - if err != nil { - return err - } - opts.InputStream = f - opts.Source = "-" - } - return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream) -} - -// BuildImageOptions present the set of informations available for building an -// image from a tarfile with a Dockerfile in it. -// -// For more details about the Docker building process, see -// http://goo.gl/tlPXPu. -type BuildImageOptions struct { - Name string `qs:"t"` - Dockerfile string `qs:"dockerfile"` - NoCache bool `qs:"nocache"` - SuppressOutput bool `qs:"q"` - Pull bool `qs:"pull"` - RmTmpContainer bool `qs:"rm"` - ForceRmTmpContainer bool `qs:"forcerm"` - Memory int64 `qs:"memory"` - Memswap int64 `qs:"memswap"` - CPUShares int64 `qs:"cpushares"` - CPUSetCPUs string `qs:"cpusetcpus"` - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` - Remote string `qs:"remote"` - Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header - AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header - ContextDir string `qs:"-"` - Ulimits []ULimit `qs:"-"` -} - -// BuildImage builds an image from a tarball's url or a Dockerfile in the input -// stream. -// -// See https://goo.gl/xySxCe for more details. -func (c *Client) BuildImage(opts BuildImageOptions) error { - if opts.OutputStream == nil { - return ErrMissingOutputStream - } - headers, err := headersWithAuth(opts.Auth, c.versionedAuthConfigs(opts.AuthConfigs)) - if err != nil { - return err - } - - if opts.Remote != "" && opts.Name == "" { - opts.Name = opts.Remote - } - if opts.InputStream != nil || opts.ContextDir != "" { - headers["Content-Type"] = "application/tar" - } else if opts.Remote == "" { - return ErrMissingRepo - } - if opts.ContextDir != "" { - if opts.InputStream != nil { - return ErrMultipleContexts - } - var err error - if opts.InputStream, err = createTarStream(opts.ContextDir, opts.Dockerfile); err != nil { - return err - } - } - - qs := queryString(&opts) - if len(opts.Ulimits) > 0 { - if b, err := json.Marshal(opts.Ulimits); err == nil { - item := url.Values(map[string][]string{}) - item.Add("ulimits", string(b)) - qs = fmt.Sprintf("%s&%s", qs, item.Encode()) - } - } - - return c.stream("POST", fmt.Sprintf("/build?%s", qs), streamOptions{ - setRawTerminal: true, - rawJSONStream: opts.RawJSONStream, - headers: headers, - in: opts.InputStream, - stdout: opts.OutputStream, - }) -} - -func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) interface{} { - if c.serverAPIVersion == nil { - c.checkAPIVersion() - } - if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion119) { - return AuthConfigurations119(authConfigs.Configs) - } - return authConfigs -} - -// TagImageOptions present the set of options to tag an image. -// -// See https://goo.gl/98ZzkU for more details. -type TagImageOptions struct { - Repo string - Tag string - Force bool -} - -// TagImage adds a tag to the image identified by the given name. -// -// See https://goo.gl/98ZzkU for more details. -func (c *Client) TagImage(name string, opts TagImageOptions) error { - if name == "" { - return ErrNoSuchImage - } - resp, err := c.do("POST", fmt.Sprintf("/images/"+name+"/tag?%s", - queryString(&opts)), doOptions{}) - - if err != nil { - return err - } - - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound { - return ErrNoSuchImage - } - - return err -} - -func isURL(u string) bool { - p, err := url.Parse(u) - if err != nil { - return false - } - return p.Scheme == "http" || p.Scheme == "https" -} - -func headersWithAuth(auths ...interface{}) (map[string]string, error) { - var headers = make(map[string]string) - - for _, auth := range auths { - switch auth.(type) { - case AuthConfiguration: - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(auth); err != nil { - return nil, err - } - headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes()) - case AuthConfigurations, AuthConfigurations119: - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(auth); err != nil { - return nil, err - } - headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes()) - } - } - - return headers, nil -} - -// APIImageSearch reflect the result of a search on the Docker Hub. -// -// See https://goo.gl/AYjyrF for more details. -type APIImageSearch struct { - Description string `json:"description,omitempty" yaml:"description,omitempty"` - IsOfficial bool `json:"is_official,omitempty" yaml:"is_official,omitempty"` - IsAutomated bool `json:"is_automated,omitempty" yaml:"is_automated,omitempty"` - Name string `json:"name,omitempty" yaml:"name,omitempty"` - StarCount int `json:"star_count,omitempty" yaml:"star_count,omitempty"` -} - -// SearchImages search the docker hub with a specific given term. -// -// See https://goo.gl/AYjyrF for more details. -func (c *Client) SearchImages(term string) ([]APIImageSearch, error) { - resp, err := c.do("GET", "/images/search?term="+term, doOptions{}) - defer resp.Body.Close() - if err != nil { - return nil, err - } - var searchResult []APIImageSearch - if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil { - return nil, err - } - return searchResult, nil -} - -// SearchImagesEx search the docker hub with a specific given term and authentication. -// -// See https://goo.gl/AYjyrF for more details. -func (c *Client) SearchImagesEx(term string, auth AuthConfiguration) ([]APIImageSearch, error) { - headers, err := headersWithAuth(auth) - if err != nil { - return nil, err - } - - resp, err := c.do("GET", "/images/search?term="+term, doOptions{ - headers: headers, - }) - if err != nil { - return nil, err - } - - defer resp.Body.Close() - - var searchResult []APIImageSearch - if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil { - return nil, err - } - - return searchResult, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/image_test.go b/vendor/github.com/fsouza/go-dockerclient/image_test.go deleted file mode 100644 index bd500751..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/image_test.go +++ /dev/null @@ -1,1014 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "reflect" - "strings" - "testing" - "time" -) - -func newTestClient(rt *FakeRoundTripper) Client { - endpoint := "http://localhost:4243" - u, _ := parseEndpoint("http://localhost:4243", false) - testAPIVersion, _ := NewAPIVersion("1.17") - client := Client{ - HTTPClient: &http.Client{Transport: rt}, - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - SkipServerVersionCheck: true, - serverAPIVersion: testAPIVersion, - } - return client -} - -type stdoutMock struct { - *bytes.Buffer -} - -func (m stdoutMock) Close() error { - return nil -} - -type stdinMock struct { - *bytes.Buffer -} - -func (m stdinMock) Close() error { - return nil -} - -func TestListImages(t *testing.T) { - body := `[ - { - "Repository":"base", - "Tag":"ubuntu-12.10", - "Id":"b750fe79269d", - "Created":1364102658 - }, - { - "Repository":"base", - "Tag":"ubuntu-quantal", - "Id":"b750fe79269d", - "Created":1364102658 - }, - { - "RepoTag": [ - "ubuntu:12.04", - "ubuntu:precise", - "ubuntu:latest" - ], - "Id": "8dbd9e392a964c", - "Created": 1365714795, - "Size": 131506275, - "VirtualSize": 131506275 - }, - { - "RepoTag": [ - "ubuntu:12.10", - "ubuntu:quantal" - ], - "ParentId": "27cf784147099545", - "Id": "b750fe79269d2e", - "Created": 1364102658, - "Size": 24653, - "VirtualSize": 180116135 - } -]` - var expected []APIImages - err := json.Unmarshal([]byte(body), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) - images, err := client.ListImages(ListImagesOptions{}) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(images, expected) { - t.Errorf("ListImages: Wrong return value. Want %#v. Got %#v.", expected, images) - } -} - -func TestListImagesParameters(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "null", status: http.StatusOK} - client := newTestClient(fakeRT) - _, err := client.ListImages(ListImagesOptions{All: false}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("ListImages({All: false}: Wrong HTTP method. Want GET. Got %s.", req.Method) - } - if all := req.URL.Query().Get("all"); all != "0" && all != "" { - t.Errorf("ListImages({All: false}): Wrong parameter. Want all=0 or not present at all. Got all=%s", all) - } - fakeRT.Reset() - _, err = client.ListImages(ListImagesOptions{All: true}) - if err != nil { - t.Fatal(err) - } - req = fakeRT.requests[0] - if all := req.URL.Query().Get("all"); all != "1" { - t.Errorf("ListImages({All: true}): Wrong parameter. Want all=1. Got all=%s", all) - } - fakeRT.Reset() - _, err = client.ListImages(ListImagesOptions{Filters: map[string][]string{ - "dangling": {"true"}, - }}) - if err != nil { - t.Fatal(err) - } - req = fakeRT.requests[0] - body := req.URL.Query().Get("filters") - var filters map[string][]string - err = json.Unmarshal([]byte(body), &filters) - if err != nil { - t.Fatal(err) - } - if len(filters["dangling"]) != 1 || filters["dangling"][0] != "true" { - t.Errorf("ListImages(dangling=[true]): Wrong filter map. Want dangling=[true], got dangling=%v", filters["dangling"]) - } -} - -func TestImageHistory(t *testing.T) { - body := `[ - { - "Id": "25daec02219d2d852f7526137213a9b199926b4b24e732eab5b8bc6c49bd470e", - "Tags": [ - "debian:7.6", - "debian:latest", - "debian:7", - "debian:wheezy" - ], - "Created": 1409856216, - "CreatedBy": "/bin/sh -c #(nop) CMD [/bin/bash]" - }, - { - "Id": "41026a5347fb5be6ed16115bf22df8569697139f246186de9ae8d4f67c335dce", - "Created": 1409856213, - "CreatedBy": "/bin/sh -c #(nop) ADD file:1ee9e97209d00e3416a4543b23574cc7259684741a46bbcbc755909b8a053a38 in /", - "Size": 85178663 - }, - { - "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", - "Tags": [ - "scratch:latest" - ], - "Created": 1371157430 - } -]` - var expected []ImageHistory - err := json.Unmarshal([]byte(body), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) - history, err := client.ImageHistory("debian:latest") - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(history, expected) { - t.Errorf("ImageHistory: Wrong return value. Want %#v. Got %#v.", expected, history) - } -} - -func TestRemoveImage(t *testing.T) { - name := "test" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - err := client.RemoveImage(name) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "DELETE" - if req.Method != expectedMethod { - t.Errorf("RemoveImage(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/images/" + name)) - if req.URL.Path != u.Path { - t.Errorf("RemoveImage(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) - } -} - -func TestRemoveImageNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound}) - err := client.RemoveImage("test:") - if err != ErrNoSuchImage { - t.Errorf("RemoveImage: wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) - } -} - -func TestRemoveImageExtended(t *testing.T) { - name := "test" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - err := client.RemoveImageExtended(name, RemoveImageOptions{Force: true, NoPrune: true}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "DELETE" - if req.Method != expectedMethod { - t.Errorf("RemoveImage(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/images/" + name)) - if req.URL.Path != u.Path { - t.Errorf("RemoveImage(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) - } - expectedQuery := "force=1&noprune=1" - if query := req.URL.Query().Encode(); query != expectedQuery { - t.Errorf("PushImage: Wrong query string. Want %q. Got %q.", expectedQuery, query) - } -} - -func TestInspectImage(t *testing.T) { - body := `{ - "Id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Parent":"27cf784147099545", - "Created":"2013-03-23T22:24:18.818426Z", - "Container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "ContainerConfig":{"Memory":1}, - "VirtualSize":12345 -}` - - created, err := time.Parse(time.RFC3339Nano, "2013-03-23T22:24:18.818426Z") - if err != nil { - t.Fatal(err) - } - - expected := Image{ - ID: "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - Parent: "27cf784147099545", - Created: created, - Container: "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - ContainerConfig: Config{ - Memory: 1, - }, - VirtualSize: 12345, - } - fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(fakeRT) - image, err := client.InspectImage(expected.ID) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*image, expected) { - t.Errorf("InspectImage(%q): Wrong image returned. Want %#v. Got %#v.", expected.ID, expected, *image) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("InspectImage(%q): Wrong HTTP method. Want GET. Got %s.", expected.ID, req.Method) - } - u, _ := url.Parse(client.getURL("/images/" + expected.ID + "/json")) - if req.URL.Path != u.Path { - t.Errorf("InspectImage(%q): Wrong request URL. Want %q. Got %q.", expected.ID, u.Path, req.URL.Path) - } -} - -func TestInspectImageNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound}) - name := "test" - image, err := client.InspectImage(name) - if image != nil { - t.Errorf("InspectImage(%q): expected image, got %#v.", name, image) - } - if err != ErrNoSuchImage { - t.Errorf("InspectImage(%q): wrong error. Want %#v. Got %#v.", name, ErrNoSuchImage, err) - } -} - -func TestPushImage(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - expected := "Pushing 1/100" - if buf.String() != expected { - t.Errorf("PushImage: Wrong output. Want %q. Got %q.", expected, buf.String()) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("PushImage: Wrong HTTP method. Want POST. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/images/test/push")) - if req.URL.Path != u.Path { - t.Errorf("PushImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } - if query := req.URL.Query().Encode(); query != "" { - t.Errorf("PushImage: Wrong query string. Want no parameters, got %q.", query) - } - - auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) - if err != nil { - t.Errorf("PushImage: caught error decoding auth. %#v", err.Error()) - } - if strings.TrimSpace(string(auth)) != "{}" { - t.Errorf("PushImage: wrong body. Want %q. Got %q.", - base64.URLEncoding.EncodeToString([]byte("{}")), req.Header.Get("X-Registry-Auth")) - } -} - -func TestPushImageWithRawJSON(t *testing.T) { - body := ` - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} - {"status":"Image successfully pushed"} - ` - fakeRT := &FakeRoundTripper{ - message: body, - status: http.StatusOK, - header: map[string]string{ - "Content-Type": "application/json", - }, - } - client := newTestClient(fakeRT) - var buf bytes.Buffer - - err := client.PushImage(PushImageOptions{ - Name: "test", - OutputStream: &buf, - RawJSONStream: true, - }, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - if buf.String() != body { - t.Errorf("PushImage: Wrong raw output. Want %q. Got %q.", body, buf.String()) - } -} - -func TestPushImageWithAuthentication(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - inputAuth := AuthConfiguration{ - Username: "gopher", - Password: "gopher123", - Email: "gopher@tsuru.io", - } - err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, inputAuth) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - var gotAuth AuthConfiguration - - auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) - if err != nil { - t.Errorf("PushImage: caught error decoding auth. %#v", err.Error()) - } - - err = json.Unmarshal(auth, &gotAuth) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotAuth, inputAuth) { - t.Errorf("PushImage: wrong auth configuration. Want %#v. Got %#v.", inputAuth, gotAuth) - } -} - -func TestPushImageCustomRegistry(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var authConfig AuthConfiguration - var buf bytes.Buffer - opts := PushImageOptions{ - Name: "test", Registry: "docker.tsuru.io", - OutputStream: &buf, - } - err := client.PushImage(opts, authConfig) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedQuery := "registry=docker.tsuru.io" - if query := req.URL.Query().Encode(); query != expectedQuery { - t.Errorf("PushImage: Wrong query string. Want %q. Got %q.", expectedQuery, query) - } -} - -func TestPushImageNoName(t *testing.T) { - client := Client{} - err := client.PushImage(PushImageOptions{}, AuthConfiguration{}) - if err != ErrNoSuchImage { - t.Errorf("PushImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) - } -} - -func TestPullImage(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - err := client.PullImage(PullImageOptions{Repository: "base", OutputStream: &buf}, - AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - expected := "Pulling 1/100" - if buf.String() != expected { - t.Errorf("PullImage: Wrong output. Want %q. Got %q.", expected, buf.String()) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("PullImage: Wrong HTTP method. Want POST. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/images/create")) - if req.URL.Path != u.Path { - t.Errorf("PullImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQuery := "fromImage=base" - if query := req.URL.Query().Encode(); query != expectedQuery { - t.Errorf("PullImage: Wrong query strin. Want %q. Got %q.", expectedQuery, query) - } -} - -func TestPullImageWithRawJSON(t *testing.T) { - body := ` - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} - ` - fakeRT := &FakeRoundTripper{ - message: body, - status: http.StatusOK, - header: map[string]string{ - "Content-Type": "application/json", - }, - } - client := newTestClient(fakeRT) - var buf bytes.Buffer - err := client.PullImage(PullImageOptions{ - Repository: "base", - OutputStream: &buf, - RawJSONStream: true, - }, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - if buf.String() != body { - t.Errorf("PullImage: Wrong raw output. Want %q. Got %q", body, buf.String()) - } -} - -func TestPullImageWithoutOutputStream(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := PullImageOptions{ - Repository: "base", - Registry: "docker.tsuru.io", - } - err := client.PullImage(opts, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestPullImageCustomRegistry(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := PullImageOptions{ - Repository: "base", - Registry: "docker.tsuru.io", - OutputStream: &buf, - } - err := client.PullImage(opts, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestPullImageTag(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := PullImageOptions{ - Repository: "base", - Registry: "docker.tsuru.io", - Tag: "latest", - OutputStream: &buf, - } - err := client.PullImage(opts, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}, "tag": {"latest"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestPullImageNoRepository(t *testing.T) { - var opts PullImageOptions - client := Client{} - err := client.PullImage(opts, AuthConfiguration{}) - if err != ErrNoSuchImage { - t.Errorf("PullImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) - } -} - -func TestImportImageFromUrl(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := ImportImageOptions{ - Source: "http://mycompany.com/file.tar", - Repository: "testimage", - Tag: "tag", - OutputStream: &buf, - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestImportImageFromInput(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - in := bytes.NewBufferString("tar content") - var buf bytes.Buffer - opts := ImportImageOptions{ - Source: "-", Repository: "testimage", - InputStream: in, OutputStream: &buf, - Tag: "tag", - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) - } - body, err := ioutil.ReadAll(req.Body) - if err != nil { - t.Errorf("ImportImage: caugth error while reading body %#v", err.Error()) - } - e := "tar content" - if string(body) != e { - t.Errorf("ImportImage: wrong body. Want %#v. Got %#v.", e, string(body)) - } -} - -func TestImportImageDoesNotPassesInputIfSourceIsNotDash(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - in := bytes.NewBufferString("foo") - opts := ImportImageOptions{ - Source: "http://test.com/container.tar", Repository: "testimage", - InputStream: in, OutputStream: &buf, - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) - } - body, err := ioutil.ReadAll(req.Body) - if err != nil { - t.Errorf("ImportImage: caugth error while reading body %#v", err.Error()) - } - if string(body) != "" { - t.Errorf("ImportImage: wrong body. Want nothing. Got %#v.", string(body)) - } -} - -func TestImportImageShouldPassTarContentToBodyWhenSourceIsFilePath(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - tarPath := "testing/data/container.tar" - opts := ImportImageOptions{ - Source: tarPath, Repository: "testimage", - OutputStream: &buf, - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - tar, err := os.Open(tarPath) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - tarContent, err := ioutil.ReadAll(tar) - body, err := ioutil.ReadAll(req.Body) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(tarContent, body) { - t.Errorf("ImportImage: wrong body. Want %#v content. Got %#v.", tarPath, body) - } -} - -func TestImportImageShouldChangeSourceToDashWhenItsAFilePath(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - tarPath := "testing/data/container.tar" - opts := ImportImageOptions{ - Source: tarPath, Repository: "testimage", - OutputStream: &buf, - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromSrc": {"-"}, "repo": {opts.Repository}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestBuildImageParameters(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - NoCache: true, - SuppressOutput: true, - Pull: true, - RmTmpContainer: true, - ForceRmTmpContainer: true, - Memory: 1024, - Memswap: 2048, - CPUShares: 10, - CPUSetCPUs: "0-3", - Ulimits: []ULimit{{Name: "nofile", Soft: 100, Hard: 200}}, - InputStream: &buf, - OutputStream: &buf, - } - err := client.BuildImage(opts) - if err != nil && strings.Index(err.Error(), "build image fail") == -1 { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{ - "t": {opts.Name}, - "nocache": {"1"}, - "q": {"1"}, - "pull": {"1"}, - "rm": {"1"}, - "forcerm": {"1"}, - "memory": {"1024"}, - "memswap": {"2048"}, - "cpushares": {"10"}, - "cpusetcpus": {"0-3"}, - "ulimits": {"[{\"Name\":\"nofile\",\"Soft\":100,\"Hard\":200}]"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestBuildImageParametersForRemoteBuild(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - Remote: "testing/data/container.tar", - SuppressOutput: true, - OutputStream: &buf, - } - err := client.BuildImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"t": {opts.Name}, "remote": {opts.Remote}, "q": {"1"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestBuildImageMissingRepoAndNilInput(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - SuppressOutput: true, - OutputStream: &buf, - } - err := client.BuildImage(opts) - if err != ErrMissingRepo { - t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingRepo, err) - } -} - -func TestBuildImageMissingOutputStream(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := BuildImageOptions{Name: "testImage"} - err := client.BuildImage(opts) - if err != ErrMissingOutputStream { - t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingOutputStream, err) - } -} - -func TestBuildImageWithRawJSON(t *testing.T) { - body := ` - {"stream":"Step 0 : FROM ubuntu:latest\n"} - {"stream":" ---\u003e 4300eb9d3c8d\n"} - {"stream":"Step 1 : MAINTAINER docker \n"} - {"stream":" ---\u003e Using cache\n"} - {"stream":" ---\u003e 3a3ed758c370\n"} - {"stream":"Step 2 : CMD /usr/bin/top\n"} - {"stream":" ---\u003e Running in 36b1479cc2e4\n"} - {"stream":" ---\u003e 4b6188aebe39\n"} - {"stream":"Removing intermediate container 36b1479cc2e4\n"} - {"stream":"Successfully built 4b6188aebe39\n"} - ` - fakeRT := &FakeRoundTripper{ - message: body, - status: http.StatusOK, - header: map[string]string{ - "Content-Type": "application/json", - }, - } - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - RmTmpContainer: true, - InputStream: &buf, - OutputStream: &buf, - RawJSONStream: true, - } - err := client.BuildImage(opts) - if err != nil { - t.Fatal(err) - } - if buf.String() != body { - t.Errorf("BuildImage: Wrong raw output. Want %q. Got %q.", body, buf.String()) - } -} - -func TestBuildImageRemoteWithoutName(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Remote: "testing/data/container.tar", - SuppressOutput: true, - OutputStream: &buf, - } - err := client.BuildImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"t": {opts.Remote}, "remote": {opts.Remote}, "q": {"1"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestTagImageParameters(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := TagImageOptions{Repo: "testImage"} - err := client.TagImage("base", opts) - if err != nil && strings.Index(err.Error(), "tag image fail") == -1 { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := "http://localhost:4243/images/base/tag?repo=testImage" - got := req.URL.String() - if !reflect.DeepEqual(got, expected) { - t.Errorf("TagImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestTagImageMissingRepo(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := TagImageOptions{Repo: "testImage"} - err := client.TagImage("", opts) - if err != ErrNoSuchImage { - t.Errorf("TestTag: wrong error returned. Want %#v. Got %#v.", - ErrNoSuchImage, err) - } -} - -func TestIsUrl(t *testing.T) { - url := "http://foo.bar/" - result := isURL(url) - if !result { - t.Errorf("isURL: wrong match. Expected %#v to be a url. Got %#v.", url, result) - } - url = "/foo/bar.tar" - result = isURL(url) - if result { - t.Errorf("isURL: wrong match. Expected %#v to not be a url. Got %#v", url, result) - } -} - -func TestLoadImage(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - tar, err := os.Open("testing/data/container.tar") - if err != nil { - t.Fatal(err) - } else { - defer tar.Close() - } - opts := LoadImageOptions{InputStream: tar} - err = client.LoadImage(opts) - if nil != err { - t.Error(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("LoadImage: wrong method. Expected %q. Got %q.", "POST", req.Method) - } - if req.URL.Path != "/images/load" { - t.Errorf("LoadImage: wrong URL. Expected %q. Got %q.", "/images/load", req.URL.Path) - } -} - -func TestExportImage(t *testing.T) { - var buf bytes.Buffer - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := ExportImageOptions{Name: "testimage", OutputStream: &buf} - err := client.ExportImage(opts) - if nil != err { - t.Error(err) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("ExportImage: wrong method. Expected %q. Got %q.", "GET", req.Method) - } - expectedPath := "/images/testimage/get" - if req.URL.Path != expectedPath { - t.Errorf("ExportIMage: wrong path. Expected %q. Got %q.", expectedPath, req.URL.Path) - } -} - -func TestExportImages(t *testing.T) { - var buf bytes.Buffer - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := ExportImagesOptions{Names: []string{"testimage1", "testimage2:latest"}, OutputStream: &buf} - err := client.ExportImages(opts) - if nil != err { - t.Error(err) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("ExportImage: wrong method. Expected %q. Got %q.", "GET", req.Method) - } - expected := "http://localhost:4243/images/get?names=testimage1&names=testimage2%3Alatest" - got := req.URL.String() - if !reflect.DeepEqual(got, expected) { - t.Errorf("ExportIMage: wrong path. Expected %q. Got %q.", expected, got) - } -} - -func TestExportImagesNoNames(t *testing.T) { - var buf bytes.Buffer - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := ExportImagesOptions{Names: []string{}, OutputStream: &buf} - err := client.ExportImages(opts) - if err == nil { - t.Error("Expected an error") - } - if err != ErrMustSpecifyNames { - t.Error(err) - } -} - -func TestSearchImages(t *testing.T) { - body := `[ - { - "description":"A container with Cassandra 2.0.3", - "is_official":true, - "is_automated":true, - "name":"poklet/cassandra", - "star_count":17 - }, - { - "description":"A container with Cassandra 2.0.3", - "is_official":true, - "is_automated":false, - "name":"poklet/cassandra", - "star_count":17 - } - , - { - "description":"A container with Cassandra 2.0.3", - "is_official":false, - "is_automated":true, - "name":"poklet/cassandra", - "star_count":17 - } -]` - var expected []APIImageSearch - err := json.Unmarshal([]byte(body), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) - result, err := client.SearchImages("cassandra") - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(result, expected) { - t.Errorf("SearchImages: Wrong return value. Want %#v. Got %#v.", expected, result) - } -} - -func TestSearchImagesEx(t *testing.T) { - body := `[ - { - "description":"A container with Cassandra 2.0.3", - "is_official":true, - "is_automated":true, - "name":"poklet/cassandra", - "star_count":17 - }, - { - "description":"A container with Cassandra 2.0.3", - "is_official":true, - "is_automated":false, - "name":"poklet/cassandra", - "star_count":17 - } - , - { - "description":"A container with Cassandra 2.0.3", - "is_official":false, - "is_automated":true, - "name":"poklet/cassandra", - "star_count":17 - } -]` - var expected []APIImageSearch - err := json.Unmarshal([]byte(body), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) - auth := AuthConfiguration{} - result, err := client.SearchImagesEx("cassandra", auth) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(result, expected) { - t.Errorf("SearchImages: Wrong return value. Want %#v. Got %#v.", expected, result) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/integration_test.go b/vendor/github.com/fsouza/go-dockerclient/integration_test.go deleted file mode 100644 index f5aeea27..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/integration_test.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build docker_integration - -package docker - -import ( - "bytes" - "os" - "testing" -) - -var dockerEndpoint string - -func init() { - dockerEndpoint = os.Getenv("DOCKER_HOST") - if dockerEndpoint == "" { - dockerEndpoint = "unix:///var/run/docker.sock" - } -} - -func TestIntegrationPullCreateStartLogs(t *testing.T) { - imageName := pullImage(t) - client := getClient() - hostConfig := HostConfig{PublishAllPorts: true} - createOpts := CreateContainerOptions{ - Config: &Config{ - Image: imageName, - Cmd: []string{"cat", "/home/gopher/file.txt"}, - User: "gopher", - }, - HostConfig: &hostConfig, - } - container, err := client.CreateContainer(createOpts) - if err != nil { - t.Fatal(err) - } - err = client.StartContainer(container.ID, &hostConfig) - if err != nil { - t.Fatal(err) - } - status, err := client.WaitContainer(container.ID) - if err != nil { - t.Error(err) - } - if status != 0 { - t.Error("WaitContainer(%q): wrong status. Want 0. Got %d", container.ID, status) - } - var stdout, stderr bytes.Buffer - logsOpts := LogsOptions{ - Container: container.ID, - OutputStream: &stdout, - ErrorStream: &stderr, - Stdout: true, - Stderr: true, - } - err = client.Logs(logsOpts) - if err != nil { - t.Error(err) - } - if stderr.String() != "" { - t.Errorf("Got unexpected stderr from logs: %q", stderr.String()) - } - expected := `Welcome to reality, wake up and rejoice -Welcome to reality, you've made the right choice -Welcome to reality, and let them hear your voice, shout it out! -` - if stdout.String() != expected { - t.Errorf("Got wrong stdout from logs.\nWant:\n%#v.\n\nGot:\n%#v.", expected, stdout.String()) - } -} - -func pullImage(t *testing.T) string { - imageName := "fsouza/go-dockerclient-integration" - var buf bytes.Buffer - pullOpts := PullImageOptions{ - Repository: imageName, - OutputStream: &buf, - } - client := getClient() - err := client.PullImage(pullOpts, AuthConfiguration{}) - if err != nil { - t.Logf("Pull output: %s", buf.String()) - t.Fatal(err) - } - return imageName -} - -func getClient() *Client { - client, _ := NewClient(dockerEndpoint) - return client -} diff --git a/vendor/github.com/fsouza/go-dockerclient/misc.go b/vendor/github.com/fsouza/go-dockerclient/misc.go deleted file mode 100644 index 34c96531..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/misc.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import "strings" - -// Version returns version information about the docker server. -// -// See https://goo.gl/ND9R8L for more details. -func (c *Client) Version() (*Env, error) { - resp, err := c.do("GET", "/version", doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var env Env - if err := env.Decode(resp.Body); err != nil { - return nil, err - } - return &env, nil -} - -// Info returns system-wide information about the Docker server. -// -// See https://goo.gl/ElTHi2 for more details. -func (c *Client) Info() (*Env, error) { - resp, err := c.do("GET", "/info", doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var info Env - if err := info.Decode(resp.Body); err != nil { - return nil, err - } - return &info, nil -} - -// ParseRepositoryTag gets the name of the repository and returns it splitted -// in two parts: the repository and the tag. -// -// Some examples: -// -// localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest -// localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, "" -func ParseRepositoryTag(repoTag string) (repository string, tag string) { - n := strings.LastIndex(repoTag, ":") - if n < 0 { - return repoTag, "" - } - if tag := repoTag[n+1:]; !strings.Contains(tag, "/") { - return repoTag[:n], tag - } - return repoTag, "" -} diff --git a/vendor/github.com/fsouza/go-dockerclient/misc_test.go b/vendor/github.com/fsouza/go-dockerclient/misc_test.go deleted file mode 100644 index ceaf076e..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/misc_test.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "net/http" - "net/url" - "reflect" - "sort" - "testing" -) - -type DockerVersion struct { - Version string - GitCommit string - GoVersion string -} - -func TestVersion(t *testing.T) { - body := `{ - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" -}` - fakeRT := FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(&fakeRT) - expected := DockerVersion{ - Version: "0.2.2", - GitCommit: "5a2a5cc+CHANGES", - GoVersion: "go1.0.3", - } - version, err := client.Version() - if err != nil { - t.Fatal(err) - } - - if result := version.Get("Version"); result != expected.Version { - t.Errorf("Version(): Wrong result. Want %#v. Got %#v.", expected.Version, version.Get("Version")) - } - if result := version.Get("GitCommit"); result != expected.GitCommit { - t.Errorf("GitCommit(): Wrong result. Want %#v. Got %#v.", expected.GitCommit, version.Get("GitCommit")) - } - if result := version.Get("GoVersion"); result != expected.GoVersion { - t.Errorf("GoVersion(): Wrong result. Want %#v. Got %#v.", expected.GoVersion, version.Get("GoVersion")) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("Version(): wrong request method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/version")) - if req.URL.Path != u.Path { - t.Errorf("Version(): wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } -} - -func TestVersionError(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError} - client := newTestClient(fakeRT) - version, err := client.Version() - if version != nil { - t.Errorf("Version(): expected value, got %#v.", version) - } - if err == nil { - t.Error("Version(): unexpected error") - } -} - -func TestInfo(t *testing.T) { - body := `{ - "Containers":11, - "Images":16, - "Debug":0, - "NFd":11, - "NGoroutines":21, - "MemoryLimit":1, - "SwapLimit":0 -}` - fakeRT := FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(&fakeRT) - expected := Env{} - expected.SetInt("Containers", 11) - expected.SetInt("Images", 16) - expected.SetBool("Debug", false) - expected.SetInt("NFd", 11) - expected.SetInt("NGoroutines", 21) - expected.SetBool("MemoryLimit", true) - expected.SetBool("SwapLimit", false) - info, err := client.Info() - if err != nil { - t.Fatal(err) - } - infoSlice := []string(*info) - expectedSlice := []string(expected) - sort.Strings(infoSlice) - sort.Strings(expectedSlice) - if !reflect.DeepEqual(expectedSlice, infoSlice) { - t.Errorf("Info(): Wrong result.\nWant %#v.\nGot %#v.", expected, *info) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("Info(): Wrong HTTP method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/info")) - if req.URL.Path != u.Path { - t.Errorf("Info(): Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } -} - -func TestInfoError(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError} - client := newTestClient(fakeRT) - version, err := client.Info() - if version != nil { - t.Errorf("Info(): expected value, got %#v.", version) - } - if err == nil { - t.Error("Info(): unexpected error") - } -} - -func TestParseRepositoryTag(t *testing.T) { - var tests = []struct { - input string - expectedRepo string - expectedTag string - }{ - { - "localhost.localdomain:5000/samalba/hipache:latest", - "localhost.localdomain:5000/samalba/hipache", - "latest", - }, - { - "localhost.localdomain:5000/samalba/hipache", - "localhost.localdomain:5000/samalba/hipache", - "", - }, - { - "tsuru/python", - "tsuru/python", - "", - }, - { - "tsuru/python:2.7", - "tsuru/python", - "2.7", - }, - } - for _, tt := range tests { - repo, tag := ParseRepositoryTag(tt.input) - if repo != tt.expectedRepo { - t.Errorf("ParseRepositoryTag(%q): wrong repository. Want %q. Got %q", tt.input, tt.expectedRepo, repo) - } - if tag != tt.expectedTag { - t.Errorf("ParseRepositoryTag(%q): wrong tag. Want %q. Got %q", tt.input, tt.expectedTag, tag) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/network.go b/vendor/github.com/fsouza/go-dockerclient/network.go deleted file mode 100644 index a7fc152d..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/network.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" -) - -// ErrNetworkAlreadyExists is the error returned by CreateNetwork when the -// network already exists. -var ErrNetworkAlreadyExists = errors.New("network already exists") - -// Network represents a network. -// -// See https://goo.gl/1kmPKZ for more details. -type Network struct { - Name string - ID string `json:"Id"` - Scope string - Driver string - Containers map[string]Endpoint - Options map[string]string -} - -// Endpoint contains network resources allocated and used for a container in a network -// -// See https://goo.gl/1kmPKZ for more details. -type Endpoint struct { - Name string - ID string `json:"EndpointID"` - MacAddress string - IPv4Address string - IPv6Address string -} - -// ListNetworks returns all networks. -// -// See https://goo.gl/1kmPKZ for more details. -func (c *Client) ListNetworks() ([]Network, error) { - resp, err := c.do("GET", "/networks", doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var networks []Network - if err := json.NewDecoder(resp.Body).Decode(&networks); err != nil { - return nil, err - } - return networks, nil -} - -// NetworkInfo returns information about a network by its ID. -// -// See https://goo.gl/1kmPKZ for more details. -func (c *Client) NetworkInfo(id string) (*Network, error) { - path := "/networks/" + id - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchNetwork{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var network Network - if err := json.NewDecoder(resp.Body).Decode(&network); err != nil { - return nil, err - } - return &network, nil -} - -// CreateNetworkOptions specify parameters to the CreateNetwork function and -// (for now) is the expected body of the "create network" http request message -// -// See https://goo.gl/1kmPKZ for more details. -type CreateNetworkOptions struct { - Name string `json:"Name"` - CheckDuplicate bool `json:"CheckDuplicate"` - Driver string `json:"Driver"` - IPAM IPAMOptions `json:"IPAM"` - Options map[string]interface{} `json:"options"` -} - -// IPAMOptions controls IP Address Management when creating a network -// -// See https://goo.gl/T8kRVH for more details. -type IPAMOptions struct { - Driver string `json:"Driver"` - Config []IPAMConfig `json:"IPAMConfig"` -} - -// IPAMConfig represents IPAM configurations -// -// See https://goo.gl/T8kRVH for more details. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// CreateNetwork creates a new network, returning the network instance, -// or an error in case of failure. -// -// See https://goo.gl/1kmPKZ for more details. -func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { - resp, err := c.do( - "POST", - "/networks/create", - doOptions{ - data: opts, - }, - ) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusConflict { - return nil, ErrNetworkAlreadyExists - } - return nil, err - } - defer resp.Body.Close() - - type createNetworkResponse struct { - ID string - } - var ( - network Network - cnr createNetworkResponse - ) - if err := json.NewDecoder(resp.Body).Decode(&cnr); err != nil { - return nil, err - } - - network.Name = opts.Name - network.ID = cnr.ID - network.Driver = opts.Driver - - return &network, nil -} - -// RemoveNetwork removes a network or an error in case of failure. -// -// See https://goo.gl/1kmPKZ for more details. -func (c *Client) RemoveNetwork(id string) error { - resp, err := c.do("DELETE", "/networks/"+id, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchNetwork{ID: id} - } - return err - } - resp.Body.Close() - return nil -} - -// NoSuchNetwork is the error returned when a given network does not exist. -type NoSuchNetwork struct { - ID string -} - -func (err *NoSuchNetwork) Error() string { - return fmt.Sprintf("No such network: %s", err.ID) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/network_test.go b/vendor/github.com/fsouza/go-dockerclient/network_test.go deleted file mode 100644 index 9e6be308..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/network_test.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "net/http" - "net/url" - "reflect" - "testing" -) - -func TestListNetworks(t *testing.T) { - jsonNetworks := `[ - { - "ID": "8dfafdbc3a40", - "Name": "blah", - "Type": "bridge", - "Endpoints":[{"ID": "918c11c8288a", "Name": "dsafdsaf", "Network": "8dfafdbc3a40"}] - }, - { - "ID": "9fb1e39c", - "Name": "foo", - "Type": "bridge", - "Endpoints":[{"ID": "c080be979dda", "Name": "lllll2222", "Network": "9fb1e39c"}] - } -]` - var expected []Network - err := json.Unmarshal([]byte(jsonNetworks), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: jsonNetworks, status: http.StatusOK}) - containers, err := client.ListNetworks() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(containers, expected) { - t.Errorf("ListNetworks: Expected %#v. Got %#v.", expected, containers) - } -} - -func TestNetworkInfo(t *testing.T) { - jsonNetwork := `{ - "ID": "8dfafdbc3a40", - "Name": "blah", - "Type": "bridge", - "Endpoints":[{"ID": "918c11c8288a", "Name": "dsafdsaf", "Network": "8dfafdbc3a40"}] - }` - var expected Network - err := json.Unmarshal([]byte(jsonNetwork), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonNetwork, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "8dfafdbc3a40" - network, err := client.NetworkInfo(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*network, expected) { - t.Errorf("NetworkInfo(%q): Expected %#v. Got %#v.", id, expected, network) - } - expectedURL, _ := url.Parse(client.getURL("/networks/8dfafdbc3a40")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("NetworkInfo(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestNetworkCreate(t *testing.T) { - jsonID := `{"ID": "8dfafdbc3a40"}` - jsonNetwork := `{ - "ID": "8dfafdbc3a40", - "Name": "foobar", - "Driver": "bridge" - }` - var expected Network - err := json.Unmarshal([]byte(jsonNetwork), &expected) - if err != nil { - t.Fatal(err) - } - - client := newTestClient(&FakeRoundTripper{message: jsonID, status: http.StatusOK}) - opts := CreateNetworkOptions{"foobar", false, "bridge", IPAMOptions{}, nil} - network, err := client.CreateNetwork(opts) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(*network, expected) { - t.Errorf("CreateNetwork: Expected %#v. Got %#v.", expected, network) - } -} - -func TestNetworkRemove(t *testing.T) { - id := "8dfafdbc3a40" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - err := client.RemoveNetwork(id) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "DELETE" - if req.Method != expectedMethod { - t.Errorf("RemoveNetwork(%q): Wrong HTTP method. Want %s. Got %s.", id, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/networks/" + id)) - if req.URL.Path != u.Path { - t.Errorf("RemoveNetwork(%q): Wrong request path. Want %q. Got %q.", id, u.Path, req.URL.Path) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/signal.go b/vendor/github.com/fsouza/go-dockerclient/signal.go deleted file mode 100644 index 16aa0038..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/signal.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -// Signal represents a signal that can be send to the container on -// KillContainer call. -type Signal int - -// These values represent all signals available on Linux, where containers will -// be running. -const ( - SIGABRT = Signal(0x6) - SIGALRM = Signal(0xe) - SIGBUS = Signal(0x7) - SIGCHLD = Signal(0x11) - SIGCLD = Signal(0x11) - SIGCONT = Signal(0x12) - SIGFPE = Signal(0x8) - SIGHUP = Signal(0x1) - SIGILL = Signal(0x4) - SIGINT = Signal(0x2) - SIGIO = Signal(0x1d) - SIGIOT = Signal(0x6) - SIGKILL = Signal(0x9) - SIGPIPE = Signal(0xd) - SIGPOLL = Signal(0x1d) - SIGPROF = Signal(0x1b) - SIGPWR = Signal(0x1e) - SIGQUIT = Signal(0x3) - SIGSEGV = Signal(0xb) - SIGSTKFLT = Signal(0x10) - SIGSTOP = Signal(0x13) - SIGSYS = Signal(0x1f) - SIGTERM = Signal(0xf) - SIGTRAP = Signal(0x5) - SIGTSTP = Signal(0x14) - SIGTTIN = Signal(0x15) - SIGTTOU = Signal(0x16) - SIGUNUSED = Signal(0x1f) - SIGURG = Signal(0x17) - SIGUSR1 = Signal(0xa) - SIGUSR2 = Signal(0xc) - SIGVTALRM = Signal(0x1a) - SIGWINCH = Signal(0x1c) - SIGXCPU = Signal(0x18) - SIGXFSZ = Signal(0x19) -) diff --git a/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/fsouza/go-dockerclient/tar.go deleted file mode 100644 index 48042cbd..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/tar.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils" -) - -func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) { - excludes, err := parseDockerignore(srcPath) - if err != nil { - return nil, err - } - - includes := []string{"."} - - // If .dockerignore mentions .dockerignore or the Dockerfile - // then make sure we send both files over to the daemon - // because Dockerfile is, obviously, needed no matter what, and - // .dockerignore is needed to know if either one needs to be - // removed. The deamon will remove them for us, if needed, after it - // parses the Dockerfile. - // - // https://github.com/docker/docker/issues/8330 - // - forceIncludeFiles := []string{".dockerignore", dockerfilePath} - - for _, includeFile := range forceIncludeFiles { - if includeFile == "" { - continue - } - keepThem, err := fileutils.Matches(includeFile, excludes) - if err != nil { - return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err) - } - if keepThem { - includes = append(includes, includeFile) - } - } - - if err := validateContextDirectory(srcPath, excludes); err != nil { - return nil, err - } - tarOpts := &archive.TarOptions{ - ExcludePatterns: excludes, - IncludeFiles: includes, - Compression: archive.Uncompressed, - NoLchown: true, - } - return archive.TarWithOptions(srcPath, tarOpts) -} - -// validateContextDirectory checks if all the contents of the directory -// can be read and returns an error if some files can't be read. -// Symlinks which point to non-existing files don't trigger an error -func validateContextDirectory(srcPath string, excludes []string) error { - return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { - // skip this directory/file if it's not in the path, it won't get added to the context - if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil { - return err - } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { - return err - } else if skip { - if f.IsDir() { - return filepath.SkipDir - } - return nil - } - - if err != nil { - if os.IsPermission(err) { - return fmt.Errorf("can't stat '%s'", filePath) - } - if os.IsNotExist(err) { - return nil - } - return err - } - - // skip checking if symlinks point to non-existing files, such symlinks can be useful - // also skip named pipes, because they hanging on open - if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { - return nil - } - - if !f.IsDir() { - currentFile, err := os.Open(filePath) - if err != nil && os.IsPermission(err) { - return fmt.Errorf("no permission to read from '%s'", filePath) - } - currentFile.Close() - } - return nil - }) -} - -func parseDockerignore(root string) ([]string, error) { - var excludes []string - ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")) - if err != nil && !os.IsNotExist(err) { - return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err) - } - excludes = strings.Split(string(ignore), "\n") - - return excludes, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/tls.go b/vendor/github.com/fsouza/go-dockerclient/tls.go deleted file mode 100644 index 55f43174..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/tls.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// The content is borrowed from Docker's own source code to provide a simple -// tls based dialer - -package docker - -import ( - "crypto/tls" - "errors" - "net" - "strings" - "time" -) - -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if cwc, ok := c.rawConn.(interface { - CloseWrite() error - }); ok { - return cwc.CloseWrite() - } - return nil -} - -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := dialer.Deadline.Sub(time.Now()) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout - } - } - - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - rawConn, err := dialer.Dial(network, addr) - if err != nil { - return nil, err - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - c := *config - c.ServerName = hostname - config = &c - } - - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel - } - - if err != nil { - rawConn.Close() - return nil, err - } - - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/volume.go b/vendor/github.com/fsouza/go-dockerclient/volume.go deleted file mode 100644 index 0e57cb12..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/volume.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "net/http" -) - -var ( - // ErrNoSuchVolume is the error returned when the volume does not exist. - ErrNoSuchVolume = errors.New("no such volume") - - // ErrVolumeInUse is the error returned when the volume requested to be removed is still in use. - ErrVolumeInUse = errors.New("volume in use and cannot be removed") -) - -// Volume represents a volume. -// -// See https://goo.gl/FZA4BK for more details. -type Volume struct { - Name string `json:"Name" yaml:"Name"` - Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"` - Mountpoint string `json:"Mountpoint,omitempty" yaml:"Mountpoint,omitempty"` -} - -// ListVolumesOptions specify parameters to the ListVolumes function. -// -// See https://goo.gl/FZA4BK for more details. -type ListVolumesOptions struct { - Filters map[string][]string -} - -// ListVolumes returns a list of available volumes in the server. -// -// See https://goo.gl/FZA4BK for more details. -func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) { - resp, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - m := make(map[string]interface{}) - if err := json.NewDecoder(resp.Body).Decode(&m); err != nil { - return nil, err - } - var volumes []Volume - volumesJSON, ok := m["Volumes"] - if !ok { - return volumes, nil - } - data, err := json.Marshal(volumesJSON) - if err != nil { - return nil, err - } - if err := json.Unmarshal(data, &volumes); err != nil { - return nil, err - } - return volumes, nil -} - -// CreateVolumeOptions specify parameters to the CreateVolume function. -// -// See https://goo.gl/pBUbZ9 for more details. -type CreateVolumeOptions struct { - Name string - Driver string - DriverOpts map[string]string -} - -// CreateVolume creates a volume on the server. -// -// See https://goo.gl/pBUbZ9 for more details. -func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) { - resp, err := c.do("POST", "/volumes/create", doOptions{data: opts}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var volume Volume - if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil { - return nil, err - } - return &volume, nil -} - -// InspectVolume returns a volume by its name. -// -// See https://goo.gl/0g9A6i for more details. -func (c *Client) InspectVolume(name string) (*Volume, error) { - resp, err := c.do("GET", "/volumes/"+name, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, ErrNoSuchVolume - } - return nil, err - } - defer resp.Body.Close() - var volume Volume - if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil { - return nil, err - } - return &volume, nil -} - -// RemoveVolume removes a volume by its name. -// -// See https://goo.gl/79GNQz for more details. -func (c *Client) RemoveVolume(name string) error { - resp, err := c.do("DELETE", "/volumes/"+name, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok { - if e.Status == http.StatusNotFound { - return ErrNoSuchVolume - } - if e.Status == http.StatusConflict { - return ErrVolumeInUse - } - } - return nil - } - defer resp.Body.Close() - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/volume_test.go b/vendor/github.com/fsouza/go-dockerclient/volume_test.go deleted file mode 100644 index e6bcca95..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/volume_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "net/http" - "net/url" - "reflect" - "testing" -) - -func TestListVolumes(t *testing.T) { - volumesData := `[ - { - "Name": "tardis", - "Driver": "local", - "Mountpoint": "/var/lib/docker/volumes/tardis" - }, - { - "Name": "foo", - "Driver": "bar", - "Mountpoint": "/var/lib/docker/volumes/bar" - } -]` - body := `{ "Volumes": ` + volumesData + ` }` - var expected []Volume - if err := json.Unmarshal([]byte(volumesData), &expected); err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) - volumes, err := client.ListVolumes(ListVolumesOptions{}) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(volumes, expected) { - t.Errorf("ListVolumes: Wrong return value. Want %#v. Got %#v.", expected, volumes) - } -} - -func TestCreateVolume(t *testing.T) { - body := `{ - "Name": "tardis", - "Driver": "local", - "Mountpoint": "/var/lib/docker/volumes/tardis" - }` - var expected Volume - if err := json.Unmarshal([]byte(body), &expected); err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(fakeRT) - volume, err := client.CreateVolume( - CreateVolumeOptions{ - Name: "tardis", - Driver: "local", - DriverOpts: map[string]string{ - "foo": "bar", - }, - }, - ) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(volume, &expected) { - t.Errorf("CreateVolume: Wrong return value. Want %#v. Got %#v.", expected, volume) - } - req := fakeRT.requests[0] - expectedMethod := "POST" - if req.Method != expectedMethod { - t.Errorf("CreateVolume(): Wrong HTTP method. Want %s. Got %s.", expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/volumes/create")) - if req.URL.Path != u.Path { - t.Errorf("CreateVolume(): Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } -} - -func TestInspectVolume(t *testing.T) { - body := `{ - "Name": "tardis", - "Driver": "local", - "Mountpoint": "/var/lib/docker/volumes/tardis" - }` - var expected Volume - if err := json.Unmarshal([]byte(body), &expected); err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(fakeRT) - name := "tardis" - volume, err := client.InspectVolume(name) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(volume, &expected) { - t.Errorf("InspectVolume: Wrong return value. Want %#v. Got %#v.", expected, volume) - } - req := fakeRT.requests[0] - expectedMethod := "GET" - if req.Method != expectedMethod { - t.Errorf("InspectVolume(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/volumes/" + name)) - if req.URL.Path != u.Path { - t.Errorf("CreateVolume(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) - } -} - -func TestRemoveVolume(t *testing.T) { - name := "test" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - if err := client.RemoveVolume(name); err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "DELETE" - if req.Method != expectedMethod { - t.Errorf("RemoveVolume(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/volumes/" + name)) - if req.URL.Path != u.Path { - t.Errorf("RemoveVolume(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) - } -} - -func TestRemoveVolumeNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such volume", status: http.StatusNotFound}) - if err := client.RemoveVolume("test:"); err != ErrNoSuchVolume { - t.Errorf("RemoveVolume: wrong error. Want %#v. Got %#v.", ErrNoSuchVolume, err) - } -} - -func TestRemoveVolumeInUse(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "volume in use and cannot be removed", status: http.StatusConflict}) - if err := client.RemoveVolume("test:"); err != ErrVolumeInUse { - t.Errorf("RemoveVolume: wrong error. Want %#v. Got %#v.", ErrVolumeInUse, err) - } -} diff --git a/vendor/github.com/godbus/dbus/CONTRIBUTING.md b/vendor/github.com/godbus/dbus/CONTRIBUTING.md new file mode 100644 index 00000000..c88f9b2b --- /dev/null +++ b/vendor/github.com/godbus/dbus/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# How to Contribute + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.markdown) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +